2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/hash.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/proc_fs.h>
101 #include <linux/seq_file.h>
102 #include <linux/stat.h>
103 #include <linux/if_bridge.h>
104 #include <linux/if_macvlan.h>
106 #include <net/pkt_sched.h>
107 #include <net/checksum.h>
108 #include <net/xfrm.h>
109 #include <linux/highmem.h>
110 #include <linux/init.h>
111 #include <linux/kmod.h>
112 #include <linux/module.h>
113 #include <linux/netpoll.h>
114 #include <linux/rcupdate.h>
115 #include <linux/delay.h>
116 #include <net/wext.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
127 #include <linux/ipv6.h>
128 #include <linux/in.h>
129 #include <linux/jhash.h>
130 #include <linux/random.h>
131 #include <trace/events/napi.h>
133 #include "net-sysfs.h"
135 /* Instead of increasing this, you should create a hash table. */
136 #define MAX_GRO_SKBS 8
138 /* This should be increased if a protocol with a bigger head is added. */
139 #define GRO_MAX_HEAD (MAX_HEADER + 128)
142 * The list of packet types we will receive (as opposed to discard)
143 * and the routines to invoke.
145 * Why 16. Because with 16 the only overlap we get on a hash of the
146 * low nibble of the protocol value is RARP/SNAP/X.25.
148 * NOTE: That is no longer true with the addition of VLAN tags. Not
149 * sure which should go first, but I bet it won't make much
150 * difference if we are running VLANs. The good news is that
151 * this protocol won't be in the list unless compiled in, so
152 * the average user (w/out VLANs) will not be adversely affected.
169 #define PTYPE_HASH_SIZE (16)
170 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
172 static DEFINE_SPINLOCK(ptype_lock);
173 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
174 static struct list_head ptype_all __read_mostly; /* Taps */
177 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
180 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
182 * Writers must hold the rtnl semaphore while they loop through the
183 * dev_base_head list, and hold dev_base_lock for writing when they do the
184 * actual updates. This allows pure readers to access the list even
185 * while a writer is preparing to update it.
187 * To put it another way, dev_base_lock is held for writing only to
188 * protect against pure readers; the rtnl semaphore provides the
189 * protection against other writers.
191 * See, for example usages, register_netdevice() and
192 * unregister_netdevice(), which must be called with the rtnl
195 DEFINE_RWLOCK(dev_base_lock);
196 EXPORT_SYMBOL(dev_base_lock);
198 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
200 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
204 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
209 /* Device list insertion */
210 static int list_netdevice(struct net_device *dev)
212 struct net *net = dev_net(dev);
216 write_lock_bh(&dev_base_lock);
217 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
218 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
219 hlist_add_head_rcu(&dev->index_hlist,
220 dev_index_hash(net, dev->ifindex));
221 write_unlock_bh(&dev_base_lock);
225 /* Device list removal
226 * caller must respect a RCU grace period before freeing/reusing dev
228 static void unlist_netdevice(struct net_device *dev)
232 /* Unlink dev from the device chain */
233 write_lock_bh(&dev_base_lock);
234 list_del_rcu(&dev->dev_list);
235 hlist_del_rcu(&dev->name_hlist);
236 hlist_del_rcu(&dev->index_hlist);
237 write_unlock_bh(&dev_base_lock);
244 static RAW_NOTIFIER_HEAD(netdev_chain);
247 * Device drivers call our routines to queue packets here. We empty the
248 * queue in the local softnet handler.
251 DEFINE_PER_CPU(struct softnet_data, softnet_data);
252 EXPORT_PER_CPU_SYMBOL(softnet_data);
254 #ifdef CONFIG_LOCKDEP
256 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
257 * according to dev->type
259 static const unsigned short netdev_lock_type[] =
260 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
261 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
262 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
263 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
264 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
265 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
266 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
267 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
268 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
269 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
270 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
271 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
272 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
273 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
274 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
275 ARPHRD_VOID, ARPHRD_NONE};
277 static const char *const netdev_lock_name[] =
278 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
279 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
280 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
281 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
282 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
283 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
284 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
285 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
286 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
287 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
288 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
289 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
290 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
291 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
292 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
293 "_xmit_VOID", "_xmit_NONE"};
295 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
296 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
298 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
302 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
303 if (netdev_lock_type[i] == dev_type)
305 /* the last key is used by default */
306 return ARRAY_SIZE(netdev_lock_type) - 1;
309 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
310 unsigned short dev_type)
314 i = netdev_lock_pos(dev_type);
315 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
316 netdev_lock_name[i]);
319 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
323 i = netdev_lock_pos(dev->type);
324 lockdep_set_class_and_name(&dev->addr_list_lock,
325 &netdev_addr_lock_key[i],
326 netdev_lock_name[i]);
329 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
330 unsigned short dev_type)
333 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
338 /*******************************************************************************
340 Protocol management and registration routines
342 *******************************************************************************/
345 * Add a protocol ID to the list. Now that the input handler is
346 * smarter we can dispense with all the messy stuff that used to be
349 * BEWARE!!! Protocol handlers, mangling input packets,
350 * MUST BE last in hash buckets and checking protocol handlers
351 * MUST start from promiscuous ptype_all chain in net_bh.
352 * It is true now, do not change it.
353 * Explanation follows: if protocol handler, mangling packet, will
354 * be the first on list, it is not able to sense, that packet
355 * is cloned and should be copied-on-write, so that it will
356 * change it and subsequent readers will get broken packet.
361 * dev_add_pack - add packet handler
362 * @pt: packet type declaration
364 * Add a protocol handler to the networking stack. The passed &packet_type
365 * is linked into kernel lists and may not be freed until it has been
366 * removed from the kernel lists.
368 * This call does not sleep therefore it can not
369 * guarantee all CPU's that are in middle of receiving packets
370 * will see the new packet type (until the next received packet).
373 void dev_add_pack(struct packet_type *pt)
377 spin_lock_bh(&ptype_lock);
378 if (pt->type == htons(ETH_P_ALL))
379 list_add_rcu(&pt->list, &ptype_all);
381 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
382 list_add_rcu(&pt->list, &ptype_base[hash]);
384 spin_unlock_bh(&ptype_lock);
386 EXPORT_SYMBOL(dev_add_pack);
389 * __dev_remove_pack - remove packet handler
390 * @pt: packet type declaration
392 * Remove a protocol handler that was previously added to the kernel
393 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
394 * from the kernel lists and can be freed or reused once this function
397 * The packet type might still be in use by receivers
398 * and must not be freed until after all the CPU's have gone
399 * through a quiescent state.
401 void __dev_remove_pack(struct packet_type *pt)
403 struct list_head *head;
404 struct packet_type *pt1;
406 spin_lock_bh(&ptype_lock);
408 if (pt->type == htons(ETH_P_ALL))
411 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
413 list_for_each_entry(pt1, head, list) {
415 list_del_rcu(&pt->list);
420 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
422 spin_unlock_bh(&ptype_lock);
424 EXPORT_SYMBOL(__dev_remove_pack);
427 * dev_remove_pack - remove packet handler
428 * @pt: packet type declaration
430 * Remove a protocol handler that was previously added to the kernel
431 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
432 * from the kernel lists and can be freed or reused once this function
435 * This call sleeps to guarantee that no CPU is looking at the packet
438 void dev_remove_pack(struct packet_type *pt)
440 __dev_remove_pack(pt);
444 EXPORT_SYMBOL(dev_remove_pack);
446 /******************************************************************************
448 Device Boot-time Settings Routines
450 *******************************************************************************/
452 /* Boot time configuration table */
453 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
456 * netdev_boot_setup_add - add new setup entry
457 * @name: name of the device
458 * @map: configured settings for the device
460 * Adds new setup entry to the dev_boot_setup list. The function
461 * returns 0 on error and 1 on success. This is a generic routine to
464 static int netdev_boot_setup_add(char *name, struct ifmap *map)
466 struct netdev_boot_setup *s;
470 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
471 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
472 memset(s[i].name, 0, sizeof(s[i].name));
473 strlcpy(s[i].name, name, IFNAMSIZ);
474 memcpy(&s[i].map, map, sizeof(s[i].map));
479 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
483 * netdev_boot_setup_check - check boot time settings
484 * @dev: the netdevice
486 * Check boot time settings for the device.
487 * The found settings are set for the device to be used
488 * later in the device probing.
489 * Returns 0 if no settings found, 1 if they are.
491 int netdev_boot_setup_check(struct net_device *dev)
493 struct netdev_boot_setup *s = dev_boot_setup;
496 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
497 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
498 !strcmp(dev->name, s[i].name)) {
499 dev->irq = s[i].map.irq;
500 dev->base_addr = s[i].map.base_addr;
501 dev->mem_start = s[i].map.mem_start;
502 dev->mem_end = s[i].map.mem_end;
508 EXPORT_SYMBOL(netdev_boot_setup_check);
512 * netdev_boot_base - get address from boot time settings
513 * @prefix: prefix for network device
514 * @unit: id for network device
516 * Check boot time settings for the base address of device.
517 * The found settings are set for the device to be used
518 * later in the device probing.
519 * Returns 0 if no settings found.
521 unsigned long netdev_boot_base(const char *prefix, int unit)
523 const struct netdev_boot_setup *s = dev_boot_setup;
527 sprintf(name, "%s%d", prefix, unit);
530 * If device already registered then return base of 1
531 * to indicate not to probe for this interface
533 if (__dev_get_by_name(&init_net, name))
536 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
537 if (!strcmp(name, s[i].name))
538 return s[i].map.base_addr;
543 * Saves at boot time configured settings for any netdevice.
545 int __init netdev_boot_setup(char *str)
550 str = get_options(str, ARRAY_SIZE(ints), ints);
555 memset(&map, 0, sizeof(map));
559 map.base_addr = ints[2];
561 map.mem_start = ints[3];
563 map.mem_end = ints[4];
565 /* Add new entry to the list */
566 return netdev_boot_setup_add(str, &map);
569 __setup("netdev=", netdev_boot_setup);
571 /*******************************************************************************
573 Device Interface Subroutines
575 *******************************************************************************/
578 * __dev_get_by_name - find a device by its name
579 * @net: the applicable net namespace
580 * @name: name to find
582 * Find an interface by name. Must be called under RTNL semaphore
583 * or @dev_base_lock. If the name is found a pointer to the device
584 * is returned. If the name is not found then %NULL is returned. The
585 * reference counters are not incremented so the caller must be
586 * careful with locks.
589 struct net_device *__dev_get_by_name(struct net *net, const char *name)
591 struct hlist_node *p;
592 struct net_device *dev;
593 struct hlist_head *head = dev_name_hash(net, name);
595 hlist_for_each_entry(dev, p, head, name_hlist)
596 if (!strncmp(dev->name, name, IFNAMSIZ))
601 EXPORT_SYMBOL(__dev_get_by_name);
604 * dev_get_by_name_rcu - find a device by its name
605 * @net: the applicable net namespace
606 * @name: name to find
608 * Find an interface by name.
609 * If the name is found a pointer to the device is returned.
610 * If the name is not found then %NULL is returned.
611 * The reference counters are not incremented so the caller must be
612 * careful with locks. The caller must hold RCU lock.
615 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
617 struct hlist_node *p;
618 struct net_device *dev;
619 struct hlist_head *head = dev_name_hash(net, name);
621 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
622 if (!strncmp(dev->name, name, IFNAMSIZ))
627 EXPORT_SYMBOL(dev_get_by_name_rcu);
630 * dev_get_by_name - find a device by its name
631 * @net: the applicable net namespace
632 * @name: name to find
634 * Find an interface by name. This can be called from any
635 * context and does its own locking. The returned handle has
636 * the usage count incremented and the caller must use dev_put() to
637 * release it when it is no longer needed. %NULL is returned if no
638 * matching device is found.
641 struct net_device *dev_get_by_name(struct net *net, const char *name)
643 struct net_device *dev;
646 dev = dev_get_by_name_rcu(net, name);
652 EXPORT_SYMBOL(dev_get_by_name);
655 * __dev_get_by_index - find a device by its ifindex
656 * @net: the applicable net namespace
657 * @ifindex: index of device
659 * Search for an interface by index. Returns %NULL if the device
660 * is not found or a pointer to the device. The device has not
661 * had its reference counter increased so the caller must be careful
662 * about locking. The caller must hold either the RTNL semaphore
666 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
668 struct hlist_node *p;
669 struct net_device *dev;
670 struct hlist_head *head = dev_index_hash(net, ifindex);
672 hlist_for_each_entry(dev, p, head, index_hlist)
673 if (dev->ifindex == ifindex)
678 EXPORT_SYMBOL(__dev_get_by_index);
681 * dev_get_by_index_rcu - find a device by its ifindex
682 * @net: the applicable net namespace
683 * @ifindex: index of device
685 * Search for an interface by index. Returns %NULL if the device
686 * is not found or a pointer to the device. The device has not
687 * had its reference counter increased so the caller must be careful
688 * about locking. The caller must hold RCU lock.
691 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
693 struct hlist_node *p;
694 struct net_device *dev;
695 struct hlist_head *head = dev_index_hash(net, ifindex);
697 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
698 if (dev->ifindex == ifindex)
703 EXPORT_SYMBOL(dev_get_by_index_rcu);
707 * dev_get_by_index - find a device by its ifindex
708 * @net: the applicable net namespace
709 * @ifindex: index of device
711 * Search for an interface by index. Returns NULL if the device
712 * is not found or a pointer to the device. The device returned has
713 * had a reference added and the pointer is safe until the user calls
714 * dev_put to indicate they have finished with it.
717 struct net_device *dev_get_by_index(struct net *net, int ifindex)
719 struct net_device *dev;
722 dev = dev_get_by_index_rcu(net, ifindex);
728 EXPORT_SYMBOL(dev_get_by_index);
731 * dev_getbyhwaddr - find a device by its hardware address
732 * @net: the applicable net namespace
733 * @type: media type of device
734 * @ha: hardware address
736 * Search for an interface by MAC address. Returns NULL if the device
737 * is not found or a pointer to the device. The caller must hold the
738 * rtnl semaphore. The returned device has not had its ref count increased
739 * and the caller must therefore be careful about locking
742 * If the API was consistent this would be __dev_get_by_hwaddr
745 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
747 struct net_device *dev;
751 for_each_netdev(net, dev)
752 if (dev->type == type &&
753 !memcmp(dev->dev_addr, ha, dev->addr_len))
758 EXPORT_SYMBOL(dev_getbyhwaddr);
760 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
762 struct net_device *dev;
765 for_each_netdev(net, dev)
766 if (dev->type == type)
771 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
773 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
775 struct net_device *dev;
778 dev = __dev_getfirstbyhwtype(net, type);
784 EXPORT_SYMBOL(dev_getfirstbyhwtype);
787 * dev_get_by_flags - find any device with given flags
788 * @net: the applicable net namespace
789 * @if_flags: IFF_* values
790 * @mask: bitmask of bits in if_flags to check
792 * Search for any interface with the given flags. Returns NULL if a device
793 * is not found or a pointer to the device. The device returned has
794 * had a reference added and the pointer is safe until the user calls
795 * dev_put to indicate they have finished with it.
798 struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
801 struct net_device *dev, *ret;
805 for_each_netdev_rcu(net, dev) {
806 if (((dev->flags ^ if_flags) & mask) == 0) {
815 EXPORT_SYMBOL(dev_get_by_flags);
818 * dev_valid_name - check if name is okay for network device
821 * Network device names need to be valid file names to
822 * to allow sysfs to work. We also disallow any kind of
825 int dev_valid_name(const char *name)
829 if (strlen(name) >= IFNAMSIZ)
831 if (!strcmp(name, ".") || !strcmp(name, ".."))
835 if (*name == '/' || isspace(*name))
841 EXPORT_SYMBOL(dev_valid_name);
844 * __dev_alloc_name - allocate a name for a device
845 * @net: network namespace to allocate the device name in
846 * @name: name format string
847 * @buf: scratch buffer and result name string
849 * Passed a format string - eg "lt%d" it will try and find a suitable
850 * id. It scans list of devices to build up a free map, then chooses
851 * the first empty slot. The caller must hold the dev_base or rtnl lock
852 * while allocating the name and adding the device in order to avoid
854 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
855 * Returns the number of the unit assigned or a negative errno code.
858 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
862 const int max_netdevices = 8*PAGE_SIZE;
863 unsigned long *inuse;
864 struct net_device *d;
866 p = strnchr(name, IFNAMSIZ-1, '%');
869 * Verify the string as this thing may have come from
870 * the user. There must be either one "%d" and no other "%"
873 if (p[1] != 'd' || strchr(p + 2, '%'))
876 /* Use one page as a bit array of possible slots */
877 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
881 for_each_netdev(net, d) {
882 if (!sscanf(d->name, name, &i))
884 if (i < 0 || i >= max_netdevices)
887 /* avoid cases where sscanf is not exact inverse of printf */
888 snprintf(buf, IFNAMSIZ, name, i);
889 if (!strncmp(buf, d->name, IFNAMSIZ))
893 i = find_first_zero_bit(inuse, max_netdevices);
894 free_page((unsigned long) inuse);
898 snprintf(buf, IFNAMSIZ, name, i);
899 if (!__dev_get_by_name(net, buf))
902 /* It is possible to run out of possible slots
903 * when the name is long and there isn't enough space left
904 * for the digits, or if all bits are used.
910 * dev_alloc_name - allocate a name for a device
912 * @name: name format string
914 * Passed a format string - eg "lt%d" it will try and find a suitable
915 * id. It scans list of devices to build up a free map, then chooses
916 * the first empty slot. The caller must hold the dev_base or rtnl lock
917 * while allocating the name and adding the device in order to avoid
919 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
920 * Returns the number of the unit assigned or a negative errno code.
923 int dev_alloc_name(struct net_device *dev, const char *name)
929 BUG_ON(!dev_net(dev));
931 ret = __dev_alloc_name(net, name, buf);
933 strlcpy(dev->name, buf, IFNAMSIZ);
936 EXPORT_SYMBOL(dev_alloc_name);
938 static int dev_get_valid_name(struct net *net, const char *name, char *buf,
941 if (!dev_valid_name(name))
944 if (fmt && strchr(name, '%'))
945 return __dev_alloc_name(net, name, buf);
946 else if (__dev_get_by_name(net, name))
948 else if (buf != name)
949 strlcpy(buf, name, IFNAMSIZ);
955 * dev_change_name - change name of a device
957 * @newname: name (or format string) must be at least IFNAMSIZ
959 * Change name of a device, can pass format strings "eth%d".
962 int dev_change_name(struct net_device *dev, const char *newname)
964 char oldname[IFNAMSIZ];
970 BUG_ON(!dev_net(dev));
973 if (dev->flags & IFF_UP)
976 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
979 memcpy(oldname, dev->name, IFNAMSIZ);
981 err = dev_get_valid_name(net, newname, dev->name, 1);
986 /* For now only devices in the initial network namespace
989 if (net_eq(net, &init_net)) {
990 ret = device_rename(&dev->dev, dev->name);
992 memcpy(dev->name, oldname, IFNAMSIZ);
997 write_lock_bh(&dev_base_lock);
998 hlist_del(&dev->name_hlist);
999 write_unlock_bh(&dev_base_lock);
1003 write_lock_bh(&dev_base_lock);
1004 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1005 write_unlock_bh(&dev_base_lock);
1007 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1008 ret = notifier_to_errno(ret);
1011 /* err >= 0 after dev_alloc_name() or stores the first errno */
1014 memcpy(dev->name, oldname, IFNAMSIZ);
1018 "%s: name change rollback failed: %d.\n",
1027 * dev_set_alias - change ifalias of a device
1029 * @alias: name up to IFALIASZ
1030 * @len: limit of bytes to copy from info
1032 * Set ifalias for a device,
1034 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1038 if (len >= IFALIASZ)
1043 kfree(dev->ifalias);
1044 dev->ifalias = NULL;
1049 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1053 strlcpy(dev->ifalias, alias, len+1);
1059 * netdev_features_change - device changes features
1060 * @dev: device to cause notification
1062 * Called to indicate a device has changed features.
1064 void netdev_features_change(struct net_device *dev)
1066 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1068 EXPORT_SYMBOL(netdev_features_change);
1071 * netdev_state_change - device changes state
1072 * @dev: device to cause notification
1074 * Called to indicate a device has changed state. This function calls
1075 * the notifier chains for netdev_chain and sends a NEWLINK message
1076 * to the routing socket.
1078 void netdev_state_change(struct net_device *dev)
1080 if (dev->flags & IFF_UP) {
1081 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1082 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1085 EXPORT_SYMBOL(netdev_state_change);
1087 void netdev_bonding_change(struct net_device *dev, unsigned long event)
1089 call_netdevice_notifiers(event, dev);
1091 EXPORT_SYMBOL(netdev_bonding_change);
1094 * dev_load - load a network module
1095 * @net: the applicable net namespace
1096 * @name: name of interface
1098 * If a network interface is not present and the process has suitable
1099 * privileges this function loads the module. If module loading is not
1100 * available in this kernel then it becomes a nop.
1103 void dev_load(struct net *net, const char *name)
1105 struct net_device *dev;
1108 dev = dev_get_by_name_rcu(net, name);
1111 if (!dev && capable(CAP_NET_ADMIN))
1112 request_module("%s", name);
1114 EXPORT_SYMBOL(dev_load);
1116 static int __dev_open(struct net_device *dev)
1118 const struct net_device_ops *ops = dev->netdev_ops;
1124 * Is it even present?
1126 if (!netif_device_present(dev))
1129 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1130 ret = notifier_to_errno(ret);
1135 * Call device private open method
1137 set_bit(__LINK_STATE_START, &dev->state);
1139 if (ops->ndo_validate_addr)
1140 ret = ops->ndo_validate_addr(dev);
1142 if (!ret && ops->ndo_open)
1143 ret = ops->ndo_open(dev);
1146 * If it went open OK then:
1150 clear_bit(__LINK_STATE_START, &dev->state);
1155 dev->flags |= IFF_UP;
1160 net_dmaengine_get();
1163 * Initialize multicasting status
1165 dev_set_rx_mode(dev);
1168 * Wakeup transmit queue engine
1177 * dev_open - prepare an interface for use.
1178 * @dev: device to open
1180 * Takes a device from down to up state. The device's private open
1181 * function is invoked and then the multicast lists are loaded. Finally
1182 * the device is moved into the up state and a %NETDEV_UP message is
1183 * sent to the netdev notifier chain.
1185 * Calling this function on an active interface is a nop. On a failure
1186 * a negative errno code is returned.
1188 int dev_open(struct net_device *dev)
1195 if (dev->flags & IFF_UP)
1201 ret = __dev_open(dev);
1206 * ... and announce new interface.
1208 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1209 call_netdevice_notifiers(NETDEV_UP, dev);
1213 EXPORT_SYMBOL(dev_open);
1215 static int __dev_close(struct net_device *dev)
1217 const struct net_device_ops *ops = dev->netdev_ops;
1223 * Tell people we are going down, so that they can
1224 * prepare to death, when device is still operating.
1226 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1228 clear_bit(__LINK_STATE_START, &dev->state);
1230 /* Synchronize to scheduled poll. We cannot touch poll list,
1231 * it can be even on different cpu. So just clear netif_running().
1233 * dev->stop() will invoke napi_disable() on all of it's
1234 * napi_struct instances on this device.
1236 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1238 dev_deactivate(dev);
1241 * Call the device specific close. This cannot fail.
1242 * Only if device is UP
1244 * We allow it to be called even after a DETACH hot-plug
1251 * Device is now down.
1254 dev->flags &= ~IFF_UP;
1259 net_dmaengine_put();
1265 * dev_close - shutdown an interface.
1266 * @dev: device to shutdown
1268 * This function moves an active device into down state. A
1269 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1270 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1273 int dev_close(struct net_device *dev)
1275 if (!(dev->flags & IFF_UP))
1281 * Tell people we are down
1283 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1284 call_netdevice_notifiers(NETDEV_DOWN, dev);
1288 EXPORT_SYMBOL(dev_close);
1292 * dev_disable_lro - disable Large Receive Offload on a device
1295 * Disable Large Receive Offload (LRO) on a net device. Must be
1296 * called under RTNL. This is needed if received packets may be
1297 * forwarded to another interface.
1299 void dev_disable_lro(struct net_device *dev)
1301 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1302 dev->ethtool_ops->set_flags) {
1303 u32 flags = dev->ethtool_ops->get_flags(dev);
1304 if (flags & ETH_FLAG_LRO) {
1305 flags &= ~ETH_FLAG_LRO;
1306 dev->ethtool_ops->set_flags(dev, flags);
1309 WARN_ON(dev->features & NETIF_F_LRO);
1311 EXPORT_SYMBOL(dev_disable_lro);
1314 static int dev_boot_phase = 1;
1317 * Device change register/unregister. These are not inline or static
1318 * as we export them to the world.
1322 * register_netdevice_notifier - register a network notifier block
1325 * Register a notifier to be called when network device events occur.
1326 * The notifier passed is linked into the kernel structures and must
1327 * not be reused until it has been unregistered. A negative errno code
1328 * is returned on a failure.
1330 * When registered all registration and up events are replayed
1331 * to the new notifier to allow device to have a race free
1332 * view of the network device list.
1335 int register_netdevice_notifier(struct notifier_block *nb)
1337 struct net_device *dev;
1338 struct net_device *last;
1343 err = raw_notifier_chain_register(&netdev_chain, nb);
1349 for_each_netdev(net, dev) {
1350 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1351 err = notifier_to_errno(err);
1355 if (!(dev->flags & IFF_UP))
1358 nb->notifier_call(nb, NETDEV_UP, dev);
1369 for_each_netdev(net, dev) {
1373 if (dev->flags & IFF_UP) {
1374 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1375 nb->notifier_call(nb, NETDEV_DOWN, dev);
1377 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1378 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1382 raw_notifier_chain_unregister(&netdev_chain, nb);
1385 EXPORT_SYMBOL(register_netdevice_notifier);
1388 * unregister_netdevice_notifier - unregister a network notifier block
1391 * Unregister a notifier previously registered by
1392 * register_netdevice_notifier(). The notifier is unlinked into the
1393 * kernel structures and may then be reused. A negative errno code
1394 * is returned on a failure.
1397 int unregister_netdevice_notifier(struct notifier_block *nb)
1402 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1406 EXPORT_SYMBOL(unregister_netdevice_notifier);
1409 * call_netdevice_notifiers - call all network notifier blocks
1410 * @val: value passed unmodified to notifier function
1411 * @dev: net_device pointer passed unmodified to notifier function
1413 * Call all network notifier blocks. Parameters and return value
1414 * are as for raw_notifier_call_chain().
1417 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1419 return raw_notifier_call_chain(&netdev_chain, val, dev);
1422 /* When > 0 there are consumers of rx skb time stamps */
1423 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1425 void net_enable_timestamp(void)
1427 atomic_inc(&netstamp_needed);
1429 EXPORT_SYMBOL(net_enable_timestamp);
1431 void net_disable_timestamp(void)
1433 atomic_dec(&netstamp_needed);
1435 EXPORT_SYMBOL(net_disable_timestamp);
1437 static inline void net_timestamp(struct sk_buff *skb)
1439 if (atomic_read(&netstamp_needed))
1440 __net_timestamp(skb);
1442 skb->tstamp.tv64 = 0;
1446 * dev_forward_skb - loopback an skb to another netif
1448 * @dev: destination network device
1449 * @skb: buffer to forward
1452 * NET_RX_SUCCESS (no congestion)
1453 * NET_RX_DROP (packet was dropped)
1455 * dev_forward_skb can be used for injecting an skb from the
1456 * start_xmit function of one device into the receive queue
1457 * of another device.
1459 * The receiving device may be in another namespace, so
1460 * we have to clear all information in the skb that could
1461 * impact namespace isolation.
1463 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1467 if (!(dev->flags & IFF_UP))
1470 if (skb->len > (dev->mtu + dev->hard_header_len))
1473 skb_set_dev(skb, dev);
1474 skb->tstamp.tv64 = 0;
1475 skb->pkt_type = PACKET_HOST;
1476 skb->protocol = eth_type_trans(skb, dev);
1477 return netif_rx(skb);
1479 EXPORT_SYMBOL_GPL(dev_forward_skb);
1482 * Support routine. Sends outgoing frames to any network
1483 * taps currently in use.
1486 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1488 struct packet_type *ptype;
1490 #ifdef CONFIG_NET_CLS_ACT
1491 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1498 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1499 /* Never send packets back to the socket
1500 * they originated from - MvS (miquels@drinkel.ow.org)
1502 if ((ptype->dev == dev || !ptype->dev) &&
1503 (ptype->af_packet_priv == NULL ||
1504 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1505 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1509 /* skb->nh should be correctly
1510 set by sender, so that the second statement is
1511 just protection against buggy protocols.
1513 skb_reset_mac_header(skb2);
1515 if (skb_network_header(skb2) < skb2->data ||
1516 skb2->network_header > skb2->tail) {
1517 if (net_ratelimit())
1518 printk(KERN_CRIT "protocol %04x is "
1520 skb2->protocol, dev->name);
1521 skb_reset_network_header(skb2);
1524 skb2->transport_header = skb2->network_header;
1525 skb2->pkt_type = PACKET_OUTGOING;
1526 ptype->func(skb2, skb->dev, ptype, skb->dev);
1533 static inline void __netif_reschedule(struct Qdisc *q)
1535 struct softnet_data *sd;
1536 unsigned long flags;
1538 local_irq_save(flags);
1539 sd = &__get_cpu_var(softnet_data);
1540 q->next_sched = sd->output_queue;
1541 sd->output_queue = q;
1542 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1543 local_irq_restore(flags);
1546 void __netif_schedule(struct Qdisc *q)
1548 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1549 __netif_reschedule(q);
1551 EXPORT_SYMBOL(__netif_schedule);
1553 void dev_kfree_skb_irq(struct sk_buff *skb)
1555 if (atomic_dec_and_test(&skb->users)) {
1556 struct softnet_data *sd;
1557 unsigned long flags;
1559 local_irq_save(flags);
1560 sd = &__get_cpu_var(softnet_data);
1561 skb->next = sd->completion_queue;
1562 sd->completion_queue = skb;
1563 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1564 local_irq_restore(flags);
1567 EXPORT_SYMBOL(dev_kfree_skb_irq);
1569 void dev_kfree_skb_any(struct sk_buff *skb)
1571 if (in_irq() || irqs_disabled())
1572 dev_kfree_skb_irq(skb);
1576 EXPORT_SYMBOL(dev_kfree_skb_any);
1580 * netif_device_detach - mark device as removed
1581 * @dev: network device
1583 * Mark device as removed from system and therefore no longer available.
1585 void netif_device_detach(struct net_device *dev)
1587 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1588 netif_running(dev)) {
1589 netif_tx_stop_all_queues(dev);
1592 EXPORT_SYMBOL(netif_device_detach);
1595 * netif_device_attach - mark device as attached
1596 * @dev: network device
1598 * Mark device as attached from system and restart if needed.
1600 void netif_device_attach(struct net_device *dev)
1602 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1603 netif_running(dev)) {
1604 netif_tx_wake_all_queues(dev);
1605 __netdev_watchdog_up(dev);
1608 EXPORT_SYMBOL(netif_device_attach);
1610 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1612 return ((features & NETIF_F_GEN_CSUM) ||
1613 ((features & NETIF_F_IP_CSUM) &&
1614 protocol == htons(ETH_P_IP)) ||
1615 ((features & NETIF_F_IPV6_CSUM) &&
1616 protocol == htons(ETH_P_IPV6)) ||
1617 ((features & NETIF_F_FCOE_CRC) &&
1618 protocol == htons(ETH_P_FCOE)));
1621 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1623 if (can_checksum_protocol(dev->features, skb->protocol))
1626 if (skb->protocol == htons(ETH_P_8021Q)) {
1627 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1628 if (can_checksum_protocol(dev->features & dev->vlan_features,
1629 veh->h_vlan_encapsulated_proto))
1637 * skb_dev_set -- assign a new device to a buffer
1638 * @skb: buffer for the new device
1639 * @dev: network device
1641 * If an skb is owned by a device already, we have to reset
1642 * all data private to the namespace a device belongs to
1643 * before assigning it a new device.
1645 #ifdef CONFIG_NET_NS
1646 void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1649 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1652 skb_init_secmark(skb);
1656 skb->ipvs_property = 0;
1657 #ifdef CONFIG_NET_SCHED
1663 EXPORT_SYMBOL(skb_set_dev);
1664 #endif /* CONFIG_NET_NS */
1667 * Invalidate hardware checksum when packet is to be mangled, and
1668 * complete checksum manually on outgoing path.
1670 int skb_checksum_help(struct sk_buff *skb)
1673 int ret = 0, offset;
1675 if (skb->ip_summed == CHECKSUM_COMPLETE)
1676 goto out_set_summed;
1678 if (unlikely(skb_shinfo(skb)->gso_size)) {
1679 /* Let GSO fix up the checksum. */
1680 goto out_set_summed;
1683 offset = skb->csum_start - skb_headroom(skb);
1684 BUG_ON(offset >= skb_headlen(skb));
1685 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1687 offset += skb->csum_offset;
1688 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1690 if (skb_cloned(skb) &&
1691 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1692 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1697 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1699 skb->ip_summed = CHECKSUM_NONE;
1703 EXPORT_SYMBOL(skb_checksum_help);
1706 * skb_gso_segment - Perform segmentation on skb.
1707 * @skb: buffer to segment
1708 * @features: features for the output path (see dev->features)
1710 * This function segments the given skb and returns a list of segments.
1712 * It may return NULL if the skb requires no segmentation. This is
1713 * only possible when GSO is used for verifying header integrity.
1715 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1717 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1718 struct packet_type *ptype;
1719 __be16 type = skb->protocol;
1722 skb_reset_mac_header(skb);
1723 skb->mac_len = skb->network_header - skb->mac_header;
1724 __skb_pull(skb, skb->mac_len);
1726 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1727 struct net_device *dev = skb->dev;
1728 struct ethtool_drvinfo info = {};
1730 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1731 dev->ethtool_ops->get_drvinfo(dev, &info);
1733 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1735 info.driver, dev ? dev->features : 0L,
1736 skb->sk ? skb->sk->sk_route_caps : 0L,
1737 skb->len, skb->data_len, skb->ip_summed);
1739 if (skb_header_cloned(skb) &&
1740 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1741 return ERR_PTR(err);
1745 list_for_each_entry_rcu(ptype,
1746 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1747 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1748 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1749 err = ptype->gso_send_check(skb);
1750 segs = ERR_PTR(err);
1751 if (err || skb_gso_ok(skb, features))
1753 __skb_push(skb, (skb->data -
1754 skb_network_header(skb)));
1756 segs = ptype->gso_segment(skb, features);
1762 __skb_push(skb, skb->data - skb_mac_header(skb));
1766 EXPORT_SYMBOL(skb_gso_segment);
1768 /* Take action when hardware reception checksum errors are detected. */
1770 void netdev_rx_csum_fault(struct net_device *dev)
1772 if (net_ratelimit()) {
1773 printk(KERN_ERR "%s: hw csum failure.\n",
1774 dev ? dev->name : "<unknown>");
1778 EXPORT_SYMBOL(netdev_rx_csum_fault);
1781 /* Actually, we should eliminate this check as soon as we know, that:
1782 * 1. IOMMU is present and allows to map all the memory.
1783 * 2. No high memory really exists on this machine.
1786 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1788 #ifdef CONFIG_HIGHMEM
1791 if (dev->features & NETIF_F_HIGHDMA)
1794 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1795 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1803 void (*destructor)(struct sk_buff *skb);
1806 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1808 static void dev_gso_skb_destructor(struct sk_buff *skb)
1810 struct dev_gso_cb *cb;
1813 struct sk_buff *nskb = skb->next;
1815 skb->next = nskb->next;
1818 } while (skb->next);
1820 cb = DEV_GSO_CB(skb);
1822 cb->destructor(skb);
1826 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1827 * @skb: buffer to segment
1829 * This function segments the given skb and stores the list of segments
1832 static int dev_gso_segment(struct sk_buff *skb)
1834 struct net_device *dev = skb->dev;
1835 struct sk_buff *segs;
1836 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1839 segs = skb_gso_segment(skb, features);
1841 /* Verifying header integrity only. */
1846 return PTR_ERR(segs);
1849 DEV_GSO_CB(skb)->destructor = skb->destructor;
1850 skb->destructor = dev_gso_skb_destructor;
1855 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1856 struct netdev_queue *txq)
1858 const struct net_device_ops *ops = dev->netdev_ops;
1859 int rc = NETDEV_TX_OK;
1861 if (likely(!skb->next)) {
1862 if (!list_empty(&ptype_all))
1863 dev_queue_xmit_nit(skb, dev);
1865 if (netif_needs_gso(dev, skb)) {
1866 if (unlikely(dev_gso_segment(skb)))
1873 * If device doesnt need skb->dst, release it right now while
1874 * its hot in this cpu cache
1876 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1879 rc = ops->ndo_start_xmit(skb, dev);
1880 if (rc == NETDEV_TX_OK)
1881 txq_trans_update(txq);
1883 * TODO: if skb_orphan() was called by
1884 * dev->hard_start_xmit() (for example, the unmodified
1885 * igb driver does that; bnx2 doesn't), then
1886 * skb_tx_software_timestamp() will be unable to send
1887 * back the time stamp.
1889 * How can this be prevented? Always create another
1890 * reference to the socket before calling
1891 * dev->hard_start_xmit()? Prevent that skb_orphan()
1892 * does anything in dev->hard_start_xmit() by clearing
1893 * the skb destructor before the call and restoring it
1894 * afterwards, then doing the skb_orphan() ourselves?
1901 struct sk_buff *nskb = skb->next;
1903 skb->next = nskb->next;
1907 * If device doesnt need nskb->dst, release it right now while
1908 * its hot in this cpu cache
1910 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1913 rc = ops->ndo_start_xmit(nskb, dev);
1914 if (unlikely(rc != NETDEV_TX_OK)) {
1915 if (rc & ~NETDEV_TX_MASK)
1916 goto out_kfree_gso_skb;
1917 nskb->next = skb->next;
1921 txq_trans_update(txq);
1922 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1923 return NETDEV_TX_BUSY;
1924 } while (skb->next);
1927 if (likely(skb->next == NULL))
1928 skb->destructor = DEV_GSO_CB(skb)->destructor;
1934 static u32 skb_tx_hashrnd;
1936 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1940 if (skb_rx_queue_recorded(skb)) {
1941 hash = skb_get_rx_queue(skb);
1942 while (unlikely(hash >= dev->real_num_tx_queues))
1943 hash -= dev->real_num_tx_queues;
1947 if (skb->sk && skb->sk->sk_hash)
1948 hash = skb->sk->sk_hash;
1950 hash = skb->protocol;
1952 hash = jhash_1word(hash, skb_tx_hashrnd);
1954 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1956 EXPORT_SYMBOL(skb_tx_hash);
1958 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1960 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1961 if (net_ratelimit()) {
1962 WARN(1, "%s selects TX queue %d, but "
1963 "real number of TX queues is %d\n",
1964 dev->name, queue_index,
1965 dev->real_num_tx_queues);
1972 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1973 struct sk_buff *skb)
1976 struct sock *sk = skb->sk;
1978 if (sk_tx_queue_recorded(sk)) {
1979 queue_index = sk_tx_queue_get(sk);
1981 const struct net_device_ops *ops = dev->netdev_ops;
1983 if (ops->ndo_select_queue) {
1984 queue_index = ops->ndo_select_queue(dev, skb);
1985 queue_index = dev_cap_txqueue(dev, queue_index);
1988 if (dev->real_num_tx_queues > 1)
1989 queue_index = skb_tx_hash(dev, skb);
1991 if (sk && sk->sk_dst_cache)
1992 sk_tx_queue_set(sk, queue_index);
1996 skb_set_queue_mapping(skb, queue_index);
1997 return netdev_get_tx_queue(dev, queue_index);
2000 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2001 struct net_device *dev,
2002 struct netdev_queue *txq)
2004 spinlock_t *root_lock = qdisc_lock(q);
2007 spin_lock(root_lock);
2008 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2011 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2012 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
2014 * This is a work-conserving queue; there are no old skbs
2015 * waiting to be sent out; and the qdisc is not running -
2016 * xmit the skb directly.
2018 __qdisc_update_bstats(q, skb->len);
2019 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
2022 clear_bit(__QDISC_STATE_RUNNING, &q->state);
2024 rc = NET_XMIT_SUCCESS;
2026 rc = qdisc_enqueue_root(skb, q);
2029 spin_unlock(root_lock);
2035 * Returns true if either:
2036 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2037 * 2. skb is fragmented and the device does not support SG, or if
2038 * at least one of fragments is in highmem and device does not
2039 * support DMA from it.
2041 static inline int skb_needs_linearize(struct sk_buff *skb,
2042 struct net_device *dev)
2044 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2045 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2046 illegal_highdma(dev, skb)));
2050 * dev_queue_xmit - transmit a buffer
2051 * @skb: buffer to transmit
2053 * Queue a buffer for transmission to a network device. The caller must
2054 * have set the device and priority and built the buffer before calling
2055 * this function. The function can be called from an interrupt.
2057 * A negative errno code is returned on a failure. A success does not
2058 * guarantee the frame will be transmitted as it may be dropped due
2059 * to congestion or traffic shaping.
2061 * -----------------------------------------------------------------------------------
2062 * I notice this method can also return errors from the queue disciplines,
2063 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2066 * Regardless of the return value, the skb is consumed, so it is currently
2067 * difficult to retry a send to this method. (You can bump the ref count
2068 * before sending to hold a reference for retry if you are careful.)
2070 * When calling this method, interrupts MUST be enabled. This is because
2071 * the BH enable code must have IRQs enabled so that it will not deadlock.
2074 int dev_queue_xmit(struct sk_buff *skb)
2076 struct net_device *dev = skb->dev;
2077 struct netdev_queue *txq;
2081 /* GSO will handle the following emulations directly. */
2082 if (netif_needs_gso(dev, skb))
2085 /* Convert a paged skb to linear, if required */
2086 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
2089 /* If packet is not checksummed and device does not support
2090 * checksumming for this protocol, complete checksumming here.
2092 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2093 skb_set_transport_header(skb, skb->csum_start -
2095 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2100 /* Disable soft irqs for various locks below. Also
2101 * stops preemption for RCU.
2105 txq = dev_pick_tx(dev, skb);
2106 q = rcu_dereference_bh(txq->qdisc);
2108 #ifdef CONFIG_NET_CLS_ACT
2109 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2112 rc = __dev_xmit_skb(skb, q, dev, txq);
2116 /* The device has no queue. Common case for software devices:
2117 loopback, all the sorts of tunnels...
2119 Really, it is unlikely that netif_tx_lock protection is necessary
2120 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2122 However, it is possible, that they rely on protection
2125 Check this and shot the lock. It is not prone from deadlocks.
2126 Either shot noqueue qdisc, it is even simpler 8)
2128 if (dev->flags & IFF_UP) {
2129 int cpu = smp_processor_id(); /* ok because BHs are off */
2131 if (txq->xmit_lock_owner != cpu) {
2133 HARD_TX_LOCK(dev, txq, cpu);
2135 if (!netif_tx_queue_stopped(txq)) {
2136 rc = dev_hard_start_xmit(skb, dev, txq);
2137 if (dev_xmit_complete(rc)) {
2138 HARD_TX_UNLOCK(dev, txq);
2142 HARD_TX_UNLOCK(dev, txq);
2143 if (net_ratelimit())
2144 printk(KERN_CRIT "Virtual device %s asks to "
2145 "queue packet!\n", dev->name);
2147 /* Recursion is detected! It is possible,
2149 if (net_ratelimit())
2150 printk(KERN_CRIT "Dead loop on virtual device "
2151 "%s, fix it urgently!\n", dev->name);
2156 rcu_read_unlock_bh();
2162 rcu_read_unlock_bh();
2165 EXPORT_SYMBOL(dev_queue_xmit);
2168 /*=======================================================================
2170 =======================================================================*/
2172 int netdev_max_backlog __read_mostly = 1000;
2173 int netdev_budget __read_mostly = 300;
2174 int weight_p __read_mostly = 64; /* old backlog weight */
2176 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2180 * netif_rx - post buffer to the network code
2181 * @skb: buffer to post
2183 * This function receives a packet from a device driver and queues it for
2184 * the upper (protocol) levels to process. It always succeeds. The buffer
2185 * may be dropped during processing for congestion control or by the
2189 * NET_RX_SUCCESS (no congestion)
2190 * NET_RX_DROP (packet was dropped)
2194 int netif_rx(struct sk_buff *skb)
2196 struct softnet_data *queue;
2197 unsigned long flags;
2199 /* if netpoll wants it, pretend we never saw it */
2200 if (netpoll_rx(skb))
2203 if (!skb->tstamp.tv64)
2207 * The code is rearranged so that the path is the most
2208 * short when CPU is congested, but is still operating.
2210 local_irq_save(flags);
2211 queue = &__get_cpu_var(softnet_data);
2213 __get_cpu_var(netdev_rx_stat).total++;
2214 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2215 if (queue->input_pkt_queue.qlen) {
2217 __skb_queue_tail(&queue->input_pkt_queue, skb);
2218 local_irq_restore(flags);
2219 return NET_RX_SUCCESS;
2222 napi_schedule(&queue->backlog);
2226 __get_cpu_var(netdev_rx_stat).dropped++;
2227 local_irq_restore(flags);
2232 EXPORT_SYMBOL(netif_rx);
2234 int netif_rx_ni(struct sk_buff *skb)
2239 err = netif_rx(skb);
2240 if (local_softirq_pending())
2246 EXPORT_SYMBOL(netif_rx_ni);
2248 static void net_tx_action(struct softirq_action *h)
2250 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2252 if (sd->completion_queue) {
2253 struct sk_buff *clist;
2255 local_irq_disable();
2256 clist = sd->completion_queue;
2257 sd->completion_queue = NULL;
2261 struct sk_buff *skb = clist;
2262 clist = clist->next;
2264 WARN_ON(atomic_read(&skb->users));
2269 if (sd->output_queue) {
2272 local_irq_disable();
2273 head = sd->output_queue;
2274 sd->output_queue = NULL;
2278 struct Qdisc *q = head;
2279 spinlock_t *root_lock;
2281 head = head->next_sched;
2283 root_lock = qdisc_lock(q);
2284 if (spin_trylock(root_lock)) {
2285 smp_mb__before_clear_bit();
2286 clear_bit(__QDISC_STATE_SCHED,
2289 spin_unlock(root_lock);
2291 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2293 __netif_reschedule(q);
2295 smp_mb__before_clear_bit();
2296 clear_bit(__QDISC_STATE_SCHED,
2304 static inline int deliver_skb(struct sk_buff *skb,
2305 struct packet_type *pt_prev,
2306 struct net_device *orig_dev)
2308 atomic_inc(&skb->users);
2309 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2312 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2314 #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2315 /* This hook is defined here for ATM LANE */
2316 int (*br_fdb_test_addr_hook)(struct net_device *dev,
2317 unsigned char *addr) __read_mostly;
2318 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2322 * If bridge module is loaded call bridging hook.
2323 * returns NULL if packet was consumed.
2325 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2326 struct sk_buff *skb) __read_mostly;
2327 EXPORT_SYMBOL_GPL(br_handle_frame_hook);
2329 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2330 struct packet_type **pt_prev, int *ret,
2331 struct net_device *orig_dev)
2333 struct net_bridge_port *port;
2335 if (skb->pkt_type == PACKET_LOOPBACK ||
2336 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2340 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2344 return br_handle_frame_hook(port, skb);
2347 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2350 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2351 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2352 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2354 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2355 struct packet_type **pt_prev,
2357 struct net_device *orig_dev)
2359 if (skb->dev->macvlan_port == NULL)
2363 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2366 return macvlan_handle_frame_hook(skb);
2369 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2372 #ifdef CONFIG_NET_CLS_ACT
2373 /* TODO: Maybe we should just force sch_ingress to be compiled in
2374 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2375 * a compare and 2 stores extra right now if we dont have it on
2376 * but have CONFIG_NET_CLS_ACT
2377 * NOTE: This doesnt stop any functionality; if you dont have
2378 * the ingress scheduler, you just cant add policies on ingress.
2381 static int ing_filter(struct sk_buff *skb)
2383 struct net_device *dev = skb->dev;
2384 u32 ttl = G_TC_RTTL(skb->tc_verd);
2385 struct netdev_queue *rxq;
2386 int result = TC_ACT_OK;
2389 if (MAX_RED_LOOP < ttl++) {
2391 "Redir loop detected Dropping packet (%d->%d)\n",
2392 skb->skb_iif, dev->ifindex);
2396 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2397 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2399 rxq = &dev->rx_queue;
2402 if (q != &noop_qdisc) {
2403 spin_lock(qdisc_lock(q));
2404 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2405 result = qdisc_enqueue_root(skb, q);
2406 spin_unlock(qdisc_lock(q));
2412 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2413 struct packet_type **pt_prev,
2414 int *ret, struct net_device *orig_dev)
2416 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2420 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2423 /* Huh? Why does turning on AF_PACKET affect this? */
2424 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2427 switch (ing_filter(skb)) {
2441 * netif_nit_deliver - deliver received packets to network taps
2444 * This function is used to deliver incoming packets to network
2445 * taps. It should be used when the normal netif_receive_skb path
2446 * is bypassed, for example because of VLAN acceleration.
2448 void netif_nit_deliver(struct sk_buff *skb)
2450 struct packet_type *ptype;
2452 if (list_empty(&ptype_all))
2455 skb_reset_network_header(skb);
2456 skb_reset_transport_header(skb);
2457 skb->mac_len = skb->network_header - skb->mac_header;
2460 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2461 if (!ptype->dev || ptype->dev == skb->dev)
2462 deliver_skb(skb, ptype, skb->dev);
2468 * netif_receive_skb - process receive buffer from network
2469 * @skb: buffer to process
2471 * netif_receive_skb() is the main receive data processing function.
2472 * It always succeeds. The buffer may be dropped during processing
2473 * for congestion control or by the protocol layers.
2475 * This function may only be called from softirq context and interrupts
2476 * should be enabled.
2478 * Return values (usually ignored):
2479 * NET_RX_SUCCESS: no congestion
2480 * NET_RX_DROP: packet was dropped
2482 int netif_receive_skb(struct sk_buff *skb)
2484 struct packet_type *ptype, *pt_prev;
2485 struct net_device *orig_dev;
2486 struct net_device *master;
2487 struct net_device *null_or_orig;
2488 struct net_device *null_or_bond;
2489 int ret = NET_RX_DROP;
2492 if (!skb->tstamp.tv64)
2495 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
2496 return NET_RX_SUCCESS;
2498 /* if we've gotten here through NAPI, check netpoll */
2499 if (netpoll_receive_skb(skb))
2503 skb->skb_iif = skb->dev->ifindex;
2505 null_or_orig = NULL;
2506 orig_dev = skb->dev;
2507 master = ACCESS_ONCE(orig_dev->master);
2509 if (skb_bond_should_drop(skb, master))
2510 null_or_orig = orig_dev; /* deliver only exact match */
2515 __get_cpu_var(netdev_rx_stat).total++;
2517 skb_reset_network_header(skb);
2518 skb_reset_transport_header(skb);
2519 skb->mac_len = skb->network_header - skb->mac_header;
2525 #ifdef CONFIG_NET_CLS_ACT
2526 if (skb->tc_verd & TC_NCLS) {
2527 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2532 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2533 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2534 ptype->dev == orig_dev) {
2536 ret = deliver_skb(skb, pt_prev, orig_dev);
2541 #ifdef CONFIG_NET_CLS_ACT
2542 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2548 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2551 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2556 * Make sure frames received on VLAN interfaces stacked on
2557 * bonding interfaces still make their way to any base bonding
2558 * device that may have registered for a specific ptype. The
2559 * handler may have to adjust skb->dev and orig_dev.
2561 null_or_bond = NULL;
2562 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2563 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2564 null_or_bond = vlan_dev_real_dev(skb->dev);
2567 type = skb->protocol;
2568 list_for_each_entry_rcu(ptype,
2569 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2570 if (ptype->type == type && (ptype->dev == null_or_orig ||
2571 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2572 ptype->dev == null_or_bond)) {
2574 ret = deliver_skb(skb, pt_prev, orig_dev);
2580 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2583 /* Jamal, now you will not able to escape explaining
2584 * me how you were going to use this. :-)
2593 EXPORT_SYMBOL(netif_receive_skb);
2595 /* Network device is going away, flush any packets still pending */
2596 static void flush_backlog(void *arg)
2598 struct net_device *dev = arg;
2599 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2600 struct sk_buff *skb, *tmp;
2602 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2603 if (skb->dev == dev) {
2604 __skb_unlink(skb, &queue->input_pkt_queue);
2609 static int napi_gro_complete(struct sk_buff *skb)
2611 struct packet_type *ptype;
2612 __be16 type = skb->protocol;
2613 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2616 if (NAPI_GRO_CB(skb)->count == 1) {
2617 skb_shinfo(skb)->gso_size = 0;
2622 list_for_each_entry_rcu(ptype, head, list) {
2623 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2626 err = ptype->gro_complete(skb);
2632 WARN_ON(&ptype->list == head);
2634 return NET_RX_SUCCESS;
2638 return netif_receive_skb(skb);
2641 static void napi_gro_flush(struct napi_struct *napi)
2643 struct sk_buff *skb, *next;
2645 for (skb = napi->gro_list; skb; skb = next) {
2648 napi_gro_complete(skb);
2651 napi->gro_count = 0;
2652 napi->gro_list = NULL;
2655 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2657 struct sk_buff **pp = NULL;
2658 struct packet_type *ptype;
2659 __be16 type = skb->protocol;
2660 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2663 enum gro_result ret;
2665 if (!(skb->dev->features & NETIF_F_GRO))
2668 if (skb_is_gso(skb) || skb_has_frags(skb))
2672 list_for_each_entry_rcu(ptype, head, list) {
2673 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2676 skb_set_network_header(skb, skb_gro_offset(skb));
2677 mac_len = skb->network_header - skb->mac_header;
2678 skb->mac_len = mac_len;
2679 NAPI_GRO_CB(skb)->same_flow = 0;
2680 NAPI_GRO_CB(skb)->flush = 0;
2681 NAPI_GRO_CB(skb)->free = 0;
2683 pp = ptype->gro_receive(&napi->gro_list, skb);
2688 if (&ptype->list == head)
2691 same_flow = NAPI_GRO_CB(skb)->same_flow;
2692 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2695 struct sk_buff *nskb = *pp;
2699 napi_gro_complete(nskb);
2706 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
2710 NAPI_GRO_CB(skb)->count = 1;
2711 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2712 skb->next = napi->gro_list;
2713 napi->gro_list = skb;
2717 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2718 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2720 BUG_ON(skb->end - skb->tail < grow);
2722 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2725 skb->data_len -= grow;
2727 skb_shinfo(skb)->frags[0].page_offset += grow;
2728 skb_shinfo(skb)->frags[0].size -= grow;
2730 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2731 put_page(skb_shinfo(skb)->frags[0].page);
2732 memmove(skb_shinfo(skb)->frags,
2733 skb_shinfo(skb)->frags + 1,
2734 --skb_shinfo(skb)->nr_frags);
2745 EXPORT_SYMBOL(dev_gro_receive);
2748 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2752 if (netpoll_rx_on(skb))
2755 for (p = napi->gro_list; p; p = p->next) {
2756 NAPI_GRO_CB(p)->same_flow =
2757 (p->dev == skb->dev) &&
2758 !compare_ether_header(skb_mac_header(p),
2759 skb_gro_mac_header(skb));
2760 NAPI_GRO_CB(p)->flush = 0;
2763 return dev_gro_receive(napi, skb);
2766 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
2770 if (netif_receive_skb(skb))
2775 case GRO_MERGED_FREE:
2786 EXPORT_SYMBOL(napi_skb_finish);
2788 void skb_gro_reset_offset(struct sk_buff *skb)
2790 NAPI_GRO_CB(skb)->data_offset = 0;
2791 NAPI_GRO_CB(skb)->frag0 = NULL;
2792 NAPI_GRO_CB(skb)->frag0_len = 0;
2794 if (skb->mac_header == skb->tail &&
2795 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2796 NAPI_GRO_CB(skb)->frag0 =
2797 page_address(skb_shinfo(skb)->frags[0].page) +
2798 skb_shinfo(skb)->frags[0].page_offset;
2799 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2802 EXPORT_SYMBOL(skb_gro_reset_offset);
2804 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2806 skb_gro_reset_offset(skb);
2808 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
2810 EXPORT_SYMBOL(napi_gro_receive);
2812 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2814 __skb_pull(skb, skb_headlen(skb));
2815 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2819 EXPORT_SYMBOL(napi_reuse_skb);
2821 struct sk_buff *napi_get_frags(struct napi_struct *napi)
2823 struct sk_buff *skb = napi->skb;
2826 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2832 EXPORT_SYMBOL(napi_get_frags);
2834 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2840 skb->protocol = eth_type_trans(skb, skb->dev);
2842 if (ret == GRO_HELD)
2843 skb_gro_pull(skb, -ETH_HLEN);
2844 else if (netif_receive_skb(skb))
2849 case GRO_MERGED_FREE:
2850 napi_reuse_skb(napi, skb);
2859 EXPORT_SYMBOL(napi_frags_finish);
2861 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2863 struct sk_buff *skb = napi->skb;
2870 skb_reset_mac_header(skb);
2871 skb_gro_reset_offset(skb);
2873 off = skb_gro_offset(skb);
2874 hlen = off + sizeof(*eth);
2875 eth = skb_gro_header_fast(skb, off);
2876 if (skb_gro_header_hard(skb, hlen)) {
2877 eth = skb_gro_header_slow(skb, hlen, off);
2878 if (unlikely(!eth)) {
2879 napi_reuse_skb(napi, skb);
2885 skb_gro_pull(skb, sizeof(*eth));
2888 * This works because the only protocols we care about don't require
2889 * special handling. We'll fix it up properly at the end.
2891 skb->protocol = eth->h_proto;
2896 EXPORT_SYMBOL(napi_frags_skb);
2898 gro_result_t napi_gro_frags(struct napi_struct *napi)
2900 struct sk_buff *skb = napi_frags_skb(napi);
2905 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2907 EXPORT_SYMBOL(napi_gro_frags);
2909 static int process_backlog(struct napi_struct *napi, int quota)
2912 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2913 unsigned long start_time = jiffies;
2915 napi->weight = weight_p;
2917 struct sk_buff *skb;
2919 local_irq_disable();
2920 skb = __skb_dequeue(&queue->input_pkt_queue);
2922 __napi_complete(napi);
2928 netif_receive_skb(skb);
2929 } while (++work < quota && jiffies == start_time);
2935 * __napi_schedule - schedule for receive
2936 * @n: entry to schedule
2938 * The entry's receive function will be scheduled to run
2940 void __napi_schedule(struct napi_struct *n)
2942 unsigned long flags;
2944 local_irq_save(flags);
2945 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2946 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2947 local_irq_restore(flags);
2949 EXPORT_SYMBOL(__napi_schedule);
2951 void __napi_complete(struct napi_struct *n)
2953 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2954 BUG_ON(n->gro_list);
2956 list_del(&n->poll_list);
2957 smp_mb__before_clear_bit();
2958 clear_bit(NAPI_STATE_SCHED, &n->state);
2960 EXPORT_SYMBOL(__napi_complete);
2962 void napi_complete(struct napi_struct *n)
2964 unsigned long flags;
2967 * don't let napi dequeue from the cpu poll list
2968 * just in case its running on a different cpu
2970 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2974 local_irq_save(flags);
2976 local_irq_restore(flags);
2978 EXPORT_SYMBOL(napi_complete);
2980 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2981 int (*poll)(struct napi_struct *, int), int weight)
2983 INIT_LIST_HEAD(&napi->poll_list);
2984 napi->gro_count = 0;
2985 napi->gro_list = NULL;
2988 napi->weight = weight;
2989 list_add(&napi->dev_list, &dev->napi_list);
2991 #ifdef CONFIG_NETPOLL
2992 spin_lock_init(&napi->poll_lock);
2993 napi->poll_owner = -1;
2995 set_bit(NAPI_STATE_SCHED, &napi->state);
2997 EXPORT_SYMBOL(netif_napi_add);
2999 void netif_napi_del(struct napi_struct *napi)
3001 struct sk_buff *skb, *next;
3003 list_del_init(&napi->dev_list);
3004 napi_free_frags(napi);
3006 for (skb = napi->gro_list; skb; skb = next) {
3012 napi->gro_list = NULL;
3013 napi->gro_count = 0;
3015 EXPORT_SYMBOL(netif_napi_del);
3018 static void net_rx_action(struct softirq_action *h)
3020 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
3021 unsigned long time_limit = jiffies + 2;
3022 int budget = netdev_budget;
3025 local_irq_disable();
3027 while (!list_empty(list)) {
3028 struct napi_struct *n;
3031 /* If softirq window is exhuasted then punt.
3032 * Allow this to run for 2 jiffies since which will allow
3033 * an average latency of 1.5/HZ.
3035 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3040 /* Even though interrupts have been re-enabled, this
3041 * access is safe because interrupts can only add new
3042 * entries to the tail of this list, and only ->poll()
3043 * calls can remove this head entry from the list.
3045 n = list_first_entry(list, struct napi_struct, poll_list);
3047 have = netpoll_poll_lock(n);
3051 /* This NAPI_STATE_SCHED test is for avoiding a race
3052 * with netpoll's poll_napi(). Only the entity which
3053 * obtains the lock and sees NAPI_STATE_SCHED set will
3054 * actually make the ->poll() call. Therefore we avoid
3055 * accidently calling ->poll() when NAPI is not scheduled.
3058 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3059 work = n->poll(n, weight);
3063 WARN_ON_ONCE(work > weight);
3067 local_irq_disable();
3069 /* Drivers must not modify the NAPI state if they
3070 * consume the entire weight. In such cases this code
3071 * still "owns" the NAPI instance and therefore can
3072 * move the instance around on the list at-will.
3074 if (unlikely(work == weight)) {
3075 if (unlikely(napi_disable_pending(n))) {
3078 local_irq_disable();
3080 list_move_tail(&n->poll_list, list);
3083 netpoll_poll_unlock(have);
3088 #ifdef CONFIG_NET_DMA
3090 * There may not be any more sk_buffs coming right now, so push
3091 * any pending DMA copies to hardware
3093 dma_issue_pending_all();
3099 __get_cpu_var(netdev_rx_stat).time_squeeze++;
3100 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3104 static gifconf_func_t *gifconf_list[NPROTO];
3107 * register_gifconf - register a SIOCGIF handler
3108 * @family: Address family
3109 * @gifconf: Function handler
3111 * Register protocol dependent address dumping routines. The handler
3112 * that is passed must not be freed or reused until it has been replaced
3113 * by another handler.
3115 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3117 if (family >= NPROTO)
3119 gifconf_list[family] = gifconf;
3122 EXPORT_SYMBOL(register_gifconf);
3126 * Map an interface index to its name (SIOCGIFNAME)
3130 * We need this ioctl for efficient implementation of the
3131 * if_indextoname() function required by the IPv6 API. Without
3132 * it, we would have to search all the interfaces to find a
3136 static int dev_ifname(struct net *net, struct ifreq __user *arg)
3138 struct net_device *dev;
3142 * Fetch the caller's info block.
3145 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3149 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3155 strcpy(ifr.ifr_name, dev->name);
3158 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3164 * Perform a SIOCGIFCONF call. This structure will change
3165 * size eventually, and there is nothing I can do about it.
3166 * Thus we will need a 'compatibility mode'.
3169 static int dev_ifconf(struct net *net, char __user *arg)
3172 struct net_device *dev;
3179 * Fetch the caller's info block.
3182 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3189 * Loop over the interfaces, and write an info block for each.
3193 for_each_netdev(net, dev) {
3194 for (i = 0; i < NPROTO; i++) {
3195 if (gifconf_list[i]) {
3198 done = gifconf_list[i](dev, NULL, 0);
3200 done = gifconf_list[i](dev, pos + total,
3210 * All done. Write the updated control block back to the caller.
3212 ifc.ifc_len = total;
3215 * Both BSD and Solaris return 0 here, so we do too.
3217 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3220 #ifdef CONFIG_PROC_FS
3222 * This is invoked by the /proc filesystem handler to display a device
3225 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3228 struct net *net = seq_file_net(seq);
3230 struct net_device *dev;
3234 return SEQ_START_TOKEN;
3237 for_each_netdev_rcu(net, dev)
3244 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3246 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3247 first_net_device(seq_file_net(seq)) :
3248 next_net_device((struct net_device *)v);
3251 return rcu_dereference(dev);
3254 void dev_seq_stop(struct seq_file *seq, void *v)
3260 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3262 const struct net_device_stats *stats = dev_get_stats(dev);
3264 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3265 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3266 dev->name, stats->rx_bytes, stats->rx_packets,
3268 stats->rx_dropped + stats->rx_missed_errors,
3269 stats->rx_fifo_errors,
3270 stats->rx_length_errors + stats->rx_over_errors +
3271 stats->rx_crc_errors + stats->rx_frame_errors,
3272 stats->rx_compressed, stats->multicast,
3273 stats->tx_bytes, stats->tx_packets,
3274 stats->tx_errors, stats->tx_dropped,
3275 stats->tx_fifo_errors, stats->collisions,
3276 stats->tx_carrier_errors +
3277 stats->tx_aborted_errors +
3278 stats->tx_window_errors +
3279 stats->tx_heartbeat_errors,
3280 stats->tx_compressed);
3284 * Called from the PROCfs module. This now uses the new arbitrary sized
3285 * /proc/net interface to create /proc/net/dev
3287 static int dev_seq_show(struct seq_file *seq, void *v)
3289 if (v == SEQ_START_TOKEN)
3290 seq_puts(seq, "Inter-| Receive "
3292 " face |bytes packets errs drop fifo frame "
3293 "compressed multicast|bytes packets errs "
3294 "drop fifo colls carrier compressed\n");
3296 dev_seq_printf_stats(seq, v);
3300 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3302 struct netif_rx_stats *rc = NULL;
3304 while (*pos < nr_cpu_ids)
3305 if (cpu_online(*pos)) {
3306 rc = &per_cpu(netdev_rx_stat, *pos);
3313 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3315 return softnet_get_online(pos);
3318 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3321 return softnet_get_online(pos);
3324 static void softnet_seq_stop(struct seq_file *seq, void *v)
3328 static int softnet_seq_show(struct seq_file *seq, void *v)
3330 struct netif_rx_stats *s = v;
3332 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3333 s->total, s->dropped, s->time_squeeze, 0,
3334 0, 0, 0, 0, /* was fastroute */
3339 static const struct seq_operations dev_seq_ops = {
3340 .start = dev_seq_start,
3341 .next = dev_seq_next,
3342 .stop = dev_seq_stop,
3343 .show = dev_seq_show,
3346 static int dev_seq_open(struct inode *inode, struct file *file)
3348 return seq_open_net(inode, file, &dev_seq_ops,
3349 sizeof(struct seq_net_private));
3352 static const struct file_operations dev_seq_fops = {
3353 .owner = THIS_MODULE,
3354 .open = dev_seq_open,
3356 .llseek = seq_lseek,
3357 .release = seq_release_net,
3360 static const struct seq_operations softnet_seq_ops = {
3361 .start = softnet_seq_start,
3362 .next = softnet_seq_next,
3363 .stop = softnet_seq_stop,
3364 .show = softnet_seq_show,
3367 static int softnet_seq_open(struct inode *inode, struct file *file)
3369 return seq_open(file, &softnet_seq_ops);
3372 static const struct file_operations softnet_seq_fops = {
3373 .owner = THIS_MODULE,
3374 .open = softnet_seq_open,
3376 .llseek = seq_lseek,
3377 .release = seq_release,
3380 static void *ptype_get_idx(loff_t pos)
3382 struct packet_type *pt = NULL;
3386 list_for_each_entry_rcu(pt, &ptype_all, list) {
3392 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3393 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3402 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3406 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3409 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3411 struct packet_type *pt;
3412 struct list_head *nxt;
3416 if (v == SEQ_START_TOKEN)
3417 return ptype_get_idx(0);
3420 nxt = pt->list.next;
3421 if (pt->type == htons(ETH_P_ALL)) {
3422 if (nxt != &ptype_all)
3425 nxt = ptype_base[0].next;
3427 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3429 while (nxt == &ptype_base[hash]) {
3430 if (++hash >= PTYPE_HASH_SIZE)
3432 nxt = ptype_base[hash].next;
3435 return list_entry(nxt, struct packet_type, list);
3438 static void ptype_seq_stop(struct seq_file *seq, void *v)
3444 static int ptype_seq_show(struct seq_file *seq, void *v)
3446 struct packet_type *pt = v;
3448 if (v == SEQ_START_TOKEN)
3449 seq_puts(seq, "Type Device Function\n");
3450 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3451 if (pt->type == htons(ETH_P_ALL))
3452 seq_puts(seq, "ALL ");
3454 seq_printf(seq, "%04x", ntohs(pt->type));
3456 seq_printf(seq, " %-8s %pF\n",
3457 pt->dev ? pt->dev->name : "", pt->func);
3463 static const struct seq_operations ptype_seq_ops = {
3464 .start = ptype_seq_start,
3465 .next = ptype_seq_next,
3466 .stop = ptype_seq_stop,
3467 .show = ptype_seq_show,
3470 static int ptype_seq_open(struct inode *inode, struct file *file)
3472 return seq_open_net(inode, file, &ptype_seq_ops,
3473 sizeof(struct seq_net_private));
3476 static const struct file_operations ptype_seq_fops = {
3477 .owner = THIS_MODULE,
3478 .open = ptype_seq_open,
3480 .llseek = seq_lseek,
3481 .release = seq_release_net,
3485 static int __net_init dev_proc_net_init(struct net *net)
3489 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3491 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3493 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3496 if (wext_proc_init(net))
3502 proc_net_remove(net, "ptype");
3504 proc_net_remove(net, "softnet_stat");
3506 proc_net_remove(net, "dev");
3510 static void __net_exit dev_proc_net_exit(struct net *net)
3512 wext_proc_exit(net);
3514 proc_net_remove(net, "ptype");
3515 proc_net_remove(net, "softnet_stat");
3516 proc_net_remove(net, "dev");
3519 static struct pernet_operations __net_initdata dev_proc_ops = {
3520 .init = dev_proc_net_init,
3521 .exit = dev_proc_net_exit,
3524 static int __init dev_proc_init(void)
3526 return register_pernet_subsys(&dev_proc_ops);
3529 #define dev_proc_init() 0
3530 #endif /* CONFIG_PROC_FS */
3534 * netdev_set_master - set up master/slave pair
3535 * @slave: slave device
3536 * @master: new master device
3538 * Changes the master device of the slave. Pass %NULL to break the
3539 * bonding. The caller must hold the RTNL semaphore. On a failure
3540 * a negative errno code is returned. On success the reference counts
3541 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3542 * function returns zero.
3544 int netdev_set_master(struct net_device *slave, struct net_device *master)
3546 struct net_device *old = slave->master;
3556 slave->master = master;
3564 slave->flags |= IFF_SLAVE;
3566 slave->flags &= ~IFF_SLAVE;
3568 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3571 EXPORT_SYMBOL(netdev_set_master);
3573 static void dev_change_rx_flags(struct net_device *dev, int flags)
3575 const struct net_device_ops *ops = dev->netdev_ops;
3577 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3578 ops->ndo_change_rx_flags(dev, flags);
3581 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3583 unsigned short old_flags = dev->flags;
3589 dev->flags |= IFF_PROMISC;
3590 dev->promiscuity += inc;
3591 if (dev->promiscuity == 0) {
3594 * If inc causes overflow, untouch promisc and return error.
3597 dev->flags &= ~IFF_PROMISC;
3599 dev->promiscuity -= inc;
3600 printk(KERN_WARNING "%s: promiscuity touches roof, "
3601 "set promiscuity failed, promiscuity feature "
3602 "of device might be broken.\n", dev->name);
3606 if (dev->flags != old_flags) {
3607 printk(KERN_INFO "device %s %s promiscuous mode\n",
3608 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3610 if (audit_enabled) {
3611 current_uid_gid(&uid, &gid);
3612 audit_log(current->audit_context, GFP_ATOMIC,
3613 AUDIT_ANOM_PROMISCUOUS,
3614 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3615 dev->name, (dev->flags & IFF_PROMISC),
3616 (old_flags & IFF_PROMISC),
3617 audit_get_loginuid(current),
3619 audit_get_sessionid(current));
3622 dev_change_rx_flags(dev, IFF_PROMISC);
3628 * dev_set_promiscuity - update promiscuity count on a device
3632 * Add or remove promiscuity from a device. While the count in the device
3633 * remains above zero the interface remains promiscuous. Once it hits zero
3634 * the device reverts back to normal filtering operation. A negative inc
3635 * value is used to drop promiscuity on the device.
3636 * Return 0 if successful or a negative errno code on error.
3638 int dev_set_promiscuity(struct net_device *dev, int inc)
3640 unsigned short old_flags = dev->flags;
3643 err = __dev_set_promiscuity(dev, inc);
3646 if (dev->flags != old_flags)
3647 dev_set_rx_mode(dev);
3650 EXPORT_SYMBOL(dev_set_promiscuity);
3653 * dev_set_allmulti - update allmulti count on a device
3657 * Add or remove reception of all multicast frames to a device. While the
3658 * count in the device remains above zero the interface remains listening
3659 * to all interfaces. Once it hits zero the device reverts back to normal
3660 * filtering operation. A negative @inc value is used to drop the counter
3661 * when releasing a resource needing all multicasts.
3662 * Return 0 if successful or a negative errno code on error.
3665 int dev_set_allmulti(struct net_device *dev, int inc)
3667 unsigned short old_flags = dev->flags;
3671 dev->flags |= IFF_ALLMULTI;
3672 dev->allmulti += inc;
3673 if (dev->allmulti == 0) {
3676 * If inc causes overflow, untouch allmulti and return error.
3679 dev->flags &= ~IFF_ALLMULTI;
3681 dev->allmulti -= inc;
3682 printk(KERN_WARNING "%s: allmulti touches roof, "
3683 "set allmulti failed, allmulti feature of "
3684 "device might be broken.\n", dev->name);
3688 if (dev->flags ^ old_flags) {
3689 dev_change_rx_flags(dev, IFF_ALLMULTI);
3690 dev_set_rx_mode(dev);
3694 EXPORT_SYMBOL(dev_set_allmulti);
3697 * Upload unicast and multicast address lists to device and
3698 * configure RX filtering. When the device doesn't support unicast
3699 * filtering it is put in promiscuous mode while unicast addresses
3702 void __dev_set_rx_mode(struct net_device *dev)
3704 const struct net_device_ops *ops = dev->netdev_ops;
3706 /* dev_open will call this function so the list will stay sane. */
3707 if (!(dev->flags&IFF_UP))
3710 if (!netif_device_present(dev))
3713 if (ops->ndo_set_rx_mode)
3714 ops->ndo_set_rx_mode(dev);
3716 /* Unicast addresses changes may only happen under the rtnl,
3717 * therefore calling __dev_set_promiscuity here is safe.
3719 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
3720 __dev_set_promiscuity(dev, 1);
3721 dev->uc_promisc = 1;
3722 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
3723 __dev_set_promiscuity(dev, -1);
3724 dev->uc_promisc = 0;
3727 if (ops->ndo_set_multicast_list)
3728 ops->ndo_set_multicast_list(dev);
3732 void dev_set_rx_mode(struct net_device *dev)
3734 netif_addr_lock_bh(dev);
3735 __dev_set_rx_mode(dev);
3736 netif_addr_unlock_bh(dev);
3739 /* hw addresses list handling functions */
3741 static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3742 int addr_len, unsigned char addr_type)
3744 struct netdev_hw_addr *ha;
3747 if (addr_len > MAX_ADDR_LEN)
3750 list_for_each_entry(ha, &list->list, list) {
3751 if (!memcmp(ha->addr, addr, addr_len) &&
3752 ha->type == addr_type) {
3759 alloc_size = sizeof(*ha);
3760 if (alloc_size < L1_CACHE_BYTES)
3761 alloc_size = L1_CACHE_BYTES;
3762 ha = kmalloc(alloc_size, GFP_ATOMIC);
3765 memcpy(ha->addr, addr, addr_len);
3766 ha->type = addr_type;
3769 list_add_tail_rcu(&ha->list, &list->list);
3774 static void ha_rcu_free(struct rcu_head *head)
3776 struct netdev_hw_addr *ha;
3778 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3782 static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3783 int addr_len, unsigned char addr_type)
3785 struct netdev_hw_addr *ha;
3787 list_for_each_entry(ha, &list->list, list) {
3788 if (!memcmp(ha->addr, addr, addr_len) &&
3789 (ha->type == addr_type || !addr_type)) {
3792 list_del_rcu(&ha->list);
3793 call_rcu(&ha->rcu_head, ha_rcu_free);
3801 static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3802 struct netdev_hw_addr_list *from_list,
3804 unsigned char addr_type)
3807 struct netdev_hw_addr *ha, *ha2;
3810 list_for_each_entry(ha, &from_list->list, list) {
3811 type = addr_type ? addr_type : ha->type;
3812 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3819 list_for_each_entry(ha2, &from_list->list, list) {
3822 type = addr_type ? addr_type : ha2->type;
3823 __hw_addr_del(to_list, ha2->addr, addr_len, type);
3828 static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3829 struct netdev_hw_addr_list *from_list,
3831 unsigned char addr_type)
3833 struct netdev_hw_addr *ha;
3836 list_for_each_entry(ha, &from_list->list, list) {
3837 type = addr_type ? addr_type : ha->type;
3838 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
3842 static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3843 struct netdev_hw_addr_list *from_list,
3847 struct netdev_hw_addr *ha, *tmp;
3849 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3851 err = __hw_addr_add(to_list, ha->addr,
3852 addr_len, ha->type);
3857 } else if (ha->refcount == 1) {
3858 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3859 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
3865 static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3866 struct netdev_hw_addr_list *from_list,
3869 struct netdev_hw_addr *ha, *tmp;
3871 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3873 __hw_addr_del(to_list, ha->addr,
3874 addr_len, ha->type);
3876 __hw_addr_del(from_list, ha->addr,
3877 addr_len, ha->type);
3882 static void __hw_addr_flush(struct netdev_hw_addr_list *list)
3884 struct netdev_hw_addr *ha, *tmp;
3886 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3887 list_del_rcu(&ha->list);
3888 call_rcu(&ha->rcu_head, ha_rcu_free);
3893 static void __hw_addr_init(struct netdev_hw_addr_list *list)
3895 INIT_LIST_HEAD(&list->list);
3899 /* Device addresses handling functions */
3901 static void dev_addr_flush(struct net_device *dev)
3903 /* rtnl_mutex must be held here */
3905 __hw_addr_flush(&dev->dev_addrs);
3906 dev->dev_addr = NULL;
3909 static int dev_addr_init(struct net_device *dev)
3911 unsigned char addr[MAX_ADDR_LEN];
3912 struct netdev_hw_addr *ha;
3915 /* rtnl_mutex must be held here */
3917 __hw_addr_init(&dev->dev_addrs);
3918 memset(addr, 0, sizeof(addr));
3919 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
3920 NETDEV_HW_ADDR_T_LAN);
3923 * Get the first (previously created) address from the list
3924 * and set dev_addr pointer to this location.
3926 ha = list_first_entry(&dev->dev_addrs.list,
3927 struct netdev_hw_addr, list);
3928 dev->dev_addr = ha->addr;
3934 * dev_addr_add - Add a device address
3936 * @addr: address to add
3937 * @addr_type: address type
3939 * Add a device address to the device or increase the reference count if
3940 * it already exists.
3942 * The caller must hold the rtnl_mutex.
3944 int dev_addr_add(struct net_device *dev, unsigned char *addr,
3945 unsigned char addr_type)
3951 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
3953 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3956 EXPORT_SYMBOL(dev_addr_add);
3959 * dev_addr_del - Release a device address.
3961 * @addr: address to delete
3962 * @addr_type: address type
3964 * Release reference to a device address and remove it from the device
3965 * if the reference count drops to zero.
3967 * The caller must hold the rtnl_mutex.
3969 int dev_addr_del(struct net_device *dev, unsigned char *addr,
3970 unsigned char addr_type)
3973 struct netdev_hw_addr *ha;
3978 * We can not remove the first address from the list because
3979 * dev->dev_addr points to that.
3981 ha = list_first_entry(&dev->dev_addrs.list,
3982 struct netdev_hw_addr, list);
3983 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3986 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
3989 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3992 EXPORT_SYMBOL(dev_addr_del);
3995 * dev_addr_add_multiple - Add device addresses from another device
3996 * @to_dev: device to which addresses will be added
3997 * @from_dev: device from which addresses will be added
3998 * @addr_type: address type - 0 means type will be used from from_dev
4000 * Add device addresses of the one device to another.
4002 * The caller must hold the rtnl_mutex.
4004 int dev_addr_add_multiple(struct net_device *to_dev,
4005 struct net_device *from_dev,
4006 unsigned char addr_type)
4012 if (from_dev->addr_len != to_dev->addr_len)
4014 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
4015 to_dev->addr_len, addr_type);
4017 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4020 EXPORT_SYMBOL(dev_addr_add_multiple);
4023 * dev_addr_del_multiple - Delete device addresses by another device
4024 * @to_dev: device where the addresses will be deleted
4025 * @from_dev: device by which addresses the addresses will be deleted
4026 * @addr_type: address type - 0 means type will used from from_dev
4028 * Deletes addresses in to device by the list of addresses in from device.
4030 * The caller must hold the rtnl_mutex.
4032 int dev_addr_del_multiple(struct net_device *to_dev,
4033 struct net_device *from_dev,
4034 unsigned char addr_type)
4038 if (from_dev->addr_len != to_dev->addr_len)
4040 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
4041 to_dev->addr_len, addr_type);
4042 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4045 EXPORT_SYMBOL(dev_addr_del_multiple);
4047 /* multicast addresses handling functions */
4049 int __dev_addr_delete(struct dev_addr_list **list, int *count,
4050 void *addr, int alen, int glbl)
4052 struct dev_addr_list *da;
4054 for (; (da = *list) != NULL; list = &da->next) {
4055 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4056 alen == da->da_addrlen) {
4058 int old_glbl = da->da_gusers;
4075 int __dev_addr_add(struct dev_addr_list **list, int *count,
4076 void *addr, int alen, int glbl)
4078 struct dev_addr_list *da;
4080 for (da = *list; da != NULL; da = da->next) {
4081 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4082 da->da_addrlen == alen) {
4084 int old_glbl = da->da_gusers;
4094 da = kzalloc(sizeof(*da), GFP_ATOMIC);
4097 memcpy(da->da_addr, addr, alen);
4098 da->da_addrlen = alen;
4100 da->da_gusers = glbl ? 1 : 0;
4108 * dev_unicast_delete - Release secondary unicast address.
4110 * @addr: address to delete
4112 * Release reference to a secondary unicast address and remove it
4113 * from the device if the reference count drops to zero.
4115 * The caller must hold the rtnl_mutex.
4117 int dev_unicast_delete(struct net_device *dev, void *addr)
4123 netif_addr_lock_bh(dev);
4124 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
4125 NETDEV_HW_ADDR_T_UNICAST);
4127 __dev_set_rx_mode(dev);
4128 netif_addr_unlock_bh(dev);
4131 EXPORT_SYMBOL(dev_unicast_delete);
4134 * dev_unicast_add - add a secondary unicast address
4136 * @addr: address to add
4138 * Add a secondary unicast address to the device or increase
4139 * the reference count if it already exists.
4141 * The caller must hold the rtnl_mutex.
4143 int dev_unicast_add(struct net_device *dev, void *addr)
4149 netif_addr_lock_bh(dev);
4150 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4151 NETDEV_HW_ADDR_T_UNICAST);
4153 __dev_set_rx_mode(dev);
4154 netif_addr_unlock_bh(dev);
4157 EXPORT_SYMBOL(dev_unicast_add);
4159 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4160 struct dev_addr_list **from, int *from_count)
4162 struct dev_addr_list *da, *next;
4166 while (da != NULL) {
4168 if (!da->da_synced) {
4169 err = __dev_addr_add(to, to_count,
4170 da->da_addr, da->da_addrlen, 0);
4175 } else if (da->da_users == 1) {
4176 __dev_addr_delete(to, to_count,
4177 da->da_addr, da->da_addrlen, 0);
4178 __dev_addr_delete(from, from_count,
4179 da->da_addr, da->da_addrlen, 0);
4185 EXPORT_SYMBOL_GPL(__dev_addr_sync);
4187 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4188 struct dev_addr_list **from, int *from_count)
4190 struct dev_addr_list *da, *next;
4193 while (da != NULL) {
4195 if (da->da_synced) {
4196 __dev_addr_delete(to, to_count,
4197 da->da_addr, da->da_addrlen, 0);
4199 __dev_addr_delete(from, from_count,
4200 da->da_addr, da->da_addrlen, 0);
4205 EXPORT_SYMBOL_GPL(__dev_addr_unsync);
4208 * dev_unicast_sync - Synchronize device's unicast list to another device
4209 * @to: destination device
4210 * @from: source device
4212 * Add newly added addresses to the destination device and release
4213 * addresses that have no users left. The source device must be
4214 * locked by netif_tx_lock_bh.
4216 * This function is intended to be called from the dev->set_rx_mode
4217 * function of layered software devices.
4219 int dev_unicast_sync(struct net_device *to, struct net_device *from)
4223 if (to->addr_len != from->addr_len)
4226 netif_addr_lock_bh(to);
4227 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
4229 __dev_set_rx_mode(to);
4230 netif_addr_unlock_bh(to);
4233 EXPORT_SYMBOL(dev_unicast_sync);
4236 * dev_unicast_unsync - Remove synchronized addresses from the destination device
4237 * @to: destination device
4238 * @from: source device
4240 * Remove all addresses that were added to the destination device by
4241 * dev_unicast_sync(). This function is intended to be called from the
4242 * dev->stop function of layered software devices.
4244 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4246 if (to->addr_len != from->addr_len)
4249 netif_addr_lock_bh(from);
4250 netif_addr_lock(to);
4251 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
4252 __dev_set_rx_mode(to);
4253 netif_addr_unlock(to);
4254 netif_addr_unlock_bh(from);
4256 EXPORT_SYMBOL(dev_unicast_unsync);
4258 static void dev_unicast_flush(struct net_device *dev)
4260 netif_addr_lock_bh(dev);
4261 __hw_addr_flush(&dev->uc);
4262 netif_addr_unlock_bh(dev);
4265 static void dev_unicast_init(struct net_device *dev)
4267 __hw_addr_init(&dev->uc);
4271 static void __dev_addr_discard(struct dev_addr_list **list)
4273 struct dev_addr_list *tmp;
4275 while (*list != NULL) {
4278 if (tmp->da_users > tmp->da_gusers)
4279 printk("__dev_addr_discard: address leakage! "
4280 "da_users=%d\n", tmp->da_users);
4285 static void dev_addr_discard(struct net_device *dev)
4287 netif_addr_lock_bh(dev);
4289 __dev_addr_discard(&dev->mc_list);
4290 netdev_mc_count(dev) = 0;
4292 netif_addr_unlock_bh(dev);
4296 * dev_get_flags - get flags reported to userspace
4299 * Get the combination of flag bits exported through APIs to userspace.
4301 unsigned dev_get_flags(const struct net_device *dev)
4305 flags = (dev->flags & ~(IFF_PROMISC |
4310 (dev->gflags & (IFF_PROMISC |
4313 if (netif_running(dev)) {
4314 if (netif_oper_up(dev))
4315 flags |= IFF_RUNNING;
4316 if (netif_carrier_ok(dev))
4317 flags |= IFF_LOWER_UP;
4318 if (netif_dormant(dev))
4319 flags |= IFF_DORMANT;
4324 EXPORT_SYMBOL(dev_get_flags);
4326 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4328 int old_flags = dev->flags;
4334 * Set the flags on our device.
4337 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4338 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4340 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4344 * Load in the correct multicast list now the flags have changed.
4347 if ((old_flags ^ flags) & IFF_MULTICAST)
4348 dev_change_rx_flags(dev, IFF_MULTICAST);
4350 dev_set_rx_mode(dev);
4353 * Have we downed the interface. We handle IFF_UP ourselves
4354 * according to user attempts to set it, rather than blindly
4359 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4360 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4363 dev_set_rx_mode(dev);
4366 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4367 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4369 dev->gflags ^= IFF_PROMISC;
4370 dev_set_promiscuity(dev, inc);
4373 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4374 is important. Some (broken) drivers set IFF_PROMISC, when
4375 IFF_ALLMULTI is requested not asking us and not reporting.
4377 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4378 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4380 dev->gflags ^= IFF_ALLMULTI;
4381 dev_set_allmulti(dev, inc);
4387 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4389 unsigned int changes = dev->flags ^ old_flags;
4391 if (changes & IFF_UP) {
4392 if (dev->flags & IFF_UP)
4393 call_netdevice_notifiers(NETDEV_UP, dev);
4395 call_netdevice_notifiers(NETDEV_DOWN, dev);
4398 if (dev->flags & IFF_UP &&
4399 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4400 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4404 * dev_change_flags - change device settings
4406 * @flags: device state flags
4408 * Change settings on device based state flags. The flags are
4409 * in the userspace exported format.
4411 int dev_change_flags(struct net_device *dev, unsigned flags)
4414 int old_flags = dev->flags;
4416 ret = __dev_change_flags(dev, flags);
4420 changes = old_flags ^ dev->flags;
4422 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4424 __dev_notify_flags(dev, old_flags);
4427 EXPORT_SYMBOL(dev_change_flags);
4430 * dev_set_mtu - Change maximum transfer unit
4432 * @new_mtu: new transfer unit
4434 * Change the maximum transfer size of the network device.
4436 int dev_set_mtu(struct net_device *dev, int new_mtu)
4438 const struct net_device_ops *ops = dev->netdev_ops;
4441 if (new_mtu == dev->mtu)
4444 /* MTU must be positive. */
4448 if (!netif_device_present(dev))
4452 if (ops->ndo_change_mtu)
4453 err = ops->ndo_change_mtu(dev, new_mtu);
4457 if (!err && dev->flags & IFF_UP)
4458 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4461 EXPORT_SYMBOL(dev_set_mtu);
4464 * dev_set_mac_address - Change Media Access Control Address
4468 * Change the hardware (MAC) address of the device
4470 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4472 const struct net_device_ops *ops = dev->netdev_ops;
4475 if (!ops->ndo_set_mac_address)
4477 if (sa->sa_family != dev->type)
4479 if (!netif_device_present(dev))
4481 err = ops->ndo_set_mac_address(dev, sa);
4483 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4486 EXPORT_SYMBOL(dev_set_mac_address);
4489 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4491 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4494 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4500 case SIOCGIFFLAGS: /* Get interface flags */
4501 ifr->ifr_flags = (short) dev_get_flags(dev);
4504 case SIOCGIFMETRIC: /* Get the metric on the interface
4505 (currently unused) */
4506 ifr->ifr_metric = 0;
4509 case SIOCGIFMTU: /* Get the MTU of a device */
4510 ifr->ifr_mtu = dev->mtu;
4515 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4517 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4518 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4519 ifr->ifr_hwaddr.sa_family = dev->type;
4527 ifr->ifr_map.mem_start = dev->mem_start;
4528 ifr->ifr_map.mem_end = dev->mem_end;
4529 ifr->ifr_map.base_addr = dev->base_addr;
4530 ifr->ifr_map.irq = dev->irq;
4531 ifr->ifr_map.dma = dev->dma;
4532 ifr->ifr_map.port = dev->if_port;
4536 ifr->ifr_ifindex = dev->ifindex;
4540 ifr->ifr_qlen = dev->tx_queue_len;
4544 /* dev_ioctl() should ensure this case
4556 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4558 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4561 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4562 const struct net_device_ops *ops;
4567 ops = dev->netdev_ops;
4570 case SIOCSIFFLAGS: /* Set interface flags */
4571 return dev_change_flags(dev, ifr->ifr_flags);
4573 case SIOCSIFMETRIC: /* Set the metric on the interface
4574 (currently unused) */
4577 case SIOCSIFMTU: /* Set the MTU of a device */
4578 return dev_set_mtu(dev, ifr->ifr_mtu);
4581 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4583 case SIOCSIFHWBROADCAST:
4584 if (ifr->ifr_hwaddr.sa_family != dev->type)
4586 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4587 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4588 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4592 if (ops->ndo_set_config) {
4593 if (!netif_device_present(dev))
4595 return ops->ndo_set_config(dev, &ifr->ifr_map);
4600 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4601 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4603 if (!netif_device_present(dev))
4605 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4609 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4610 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4612 if (!netif_device_present(dev))
4614 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4618 if (ifr->ifr_qlen < 0)
4620 dev->tx_queue_len = ifr->ifr_qlen;
4624 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4625 return dev_change_name(dev, ifr->ifr_newname);
4628 * Unknown or private ioctl
4631 if ((cmd >= SIOCDEVPRIVATE &&
4632 cmd <= SIOCDEVPRIVATE + 15) ||
4633 cmd == SIOCBONDENSLAVE ||
4634 cmd == SIOCBONDRELEASE ||
4635 cmd == SIOCBONDSETHWADDR ||
4636 cmd == SIOCBONDSLAVEINFOQUERY ||
4637 cmd == SIOCBONDINFOQUERY ||
4638 cmd == SIOCBONDCHANGEACTIVE ||
4639 cmd == SIOCGMIIPHY ||
4640 cmd == SIOCGMIIREG ||
4641 cmd == SIOCSMIIREG ||
4642 cmd == SIOCBRADDIF ||
4643 cmd == SIOCBRDELIF ||
4644 cmd == SIOCSHWTSTAMP ||
4645 cmd == SIOCWANDEV) {
4647 if (ops->ndo_do_ioctl) {
4648 if (netif_device_present(dev))
4649 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4661 * This function handles all "interface"-type I/O control requests. The actual
4662 * 'doing' part of this is dev_ifsioc above.
4666 * dev_ioctl - network device ioctl
4667 * @net: the applicable net namespace
4668 * @cmd: command to issue
4669 * @arg: pointer to a struct ifreq in user space
4671 * Issue ioctl functions to devices. This is normally called by the
4672 * user space syscall interfaces but can sometimes be useful for
4673 * other purposes. The return value is the return from the syscall if
4674 * positive or a negative errno code on error.
4677 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4683 /* One special case: SIOCGIFCONF takes ifconf argument
4684 and requires shared lock, because it sleeps writing
4688 if (cmd == SIOCGIFCONF) {
4690 ret = dev_ifconf(net, (char __user *) arg);
4694 if (cmd == SIOCGIFNAME)
4695 return dev_ifname(net, (struct ifreq __user *)arg);
4697 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4700 ifr.ifr_name[IFNAMSIZ-1] = 0;
4702 colon = strchr(ifr.ifr_name, ':');
4707 * See which interface the caller is talking about.
4712 * These ioctl calls:
4713 * - can be done by all.
4714 * - atomic and do not require locking.
4725 dev_load(net, ifr.ifr_name);
4727 ret = dev_ifsioc_locked(net, &ifr, cmd);
4732 if (copy_to_user(arg, &ifr,
4733 sizeof(struct ifreq)))
4739 dev_load(net, ifr.ifr_name);
4741 ret = dev_ethtool(net, &ifr);
4746 if (copy_to_user(arg, &ifr,
4747 sizeof(struct ifreq)))
4753 * These ioctl calls:
4754 * - require superuser power.
4755 * - require strict serialization.
4761 if (!capable(CAP_NET_ADMIN))
4763 dev_load(net, ifr.ifr_name);
4765 ret = dev_ifsioc(net, &ifr, cmd);
4770 if (copy_to_user(arg, &ifr,
4771 sizeof(struct ifreq)))
4777 * These ioctl calls:
4778 * - require superuser power.
4779 * - require strict serialization.
4780 * - do not return a value
4790 case SIOCSIFHWBROADCAST:
4793 case SIOCBONDENSLAVE:
4794 case SIOCBONDRELEASE:
4795 case SIOCBONDSETHWADDR:
4796 case SIOCBONDCHANGEACTIVE:
4800 if (!capable(CAP_NET_ADMIN))
4803 case SIOCBONDSLAVEINFOQUERY:
4804 case SIOCBONDINFOQUERY:
4805 dev_load(net, ifr.ifr_name);
4807 ret = dev_ifsioc(net, &ifr, cmd);
4812 /* Get the per device memory space. We can add this but
4813 * currently do not support it */
4815 /* Set the per device memory buffer space.
4816 * Not applicable in our case */
4821 * Unknown or private ioctl.
4824 if (cmd == SIOCWANDEV ||
4825 (cmd >= SIOCDEVPRIVATE &&
4826 cmd <= SIOCDEVPRIVATE + 15)) {
4827 dev_load(net, ifr.ifr_name);
4829 ret = dev_ifsioc(net, &ifr, cmd);
4831 if (!ret && copy_to_user(arg, &ifr,
4832 sizeof(struct ifreq)))
4836 /* Take care of Wireless Extensions */
4837 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4838 return wext_handle_ioctl(net, &ifr, cmd, arg);
4845 * dev_new_index - allocate an ifindex
4846 * @net: the applicable net namespace
4848 * Returns a suitable unique value for a new device interface
4849 * number. The caller must hold the rtnl semaphore or the
4850 * dev_base_lock to be sure it remains unique.
4852 static int dev_new_index(struct net *net)
4858 if (!__dev_get_by_index(net, ifindex))
4863 /* Delayed registration/unregisteration */
4864 static LIST_HEAD(net_todo_list);
4866 static void net_set_todo(struct net_device *dev)
4868 list_add_tail(&dev->todo_list, &net_todo_list);
4871 static void rollback_registered_many(struct list_head *head)
4873 struct net_device *dev, *tmp;
4875 BUG_ON(dev_boot_phase);
4878 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
4879 /* Some devices call without registering
4880 * for initialization unwind. Remove those
4881 * devices and proceed with the remaining.
4883 if (dev->reg_state == NETREG_UNINITIALIZED) {
4884 pr_debug("unregister_netdevice: device %s/%p never "
4885 "was registered\n", dev->name, dev);
4888 list_del(&dev->unreg_list);
4892 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4894 /* If device is running, close it first. */
4897 /* And unlink it from device chain. */
4898 unlist_netdevice(dev);
4900 dev->reg_state = NETREG_UNREGISTERING;
4905 list_for_each_entry(dev, head, unreg_list) {
4906 /* Shutdown queueing discipline. */
4910 /* Notify protocols, that we are about to destroy
4911 this device. They should clean all the things.
4913 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4915 if (!dev->rtnl_link_ops ||
4916 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4917 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4920 * Flush the unicast and multicast chains
4922 dev_unicast_flush(dev);
4923 dev_addr_discard(dev);
4925 if (dev->netdev_ops->ndo_uninit)
4926 dev->netdev_ops->ndo_uninit(dev);
4928 /* Notifier chain MUST detach us from master device. */
4929 WARN_ON(dev->master);
4931 /* Remove entries from kobject tree */
4932 netdev_unregister_kobject(dev);
4935 /* Process any work delayed until the end of the batch */
4936 dev = list_first_entry(head, struct net_device, unreg_list);
4937 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4941 list_for_each_entry(dev, head, unreg_list)
4945 static void rollback_registered(struct net_device *dev)
4949 list_add(&dev->unreg_list, &single);
4950 rollback_registered_many(&single);
4953 static void __netdev_init_queue_locks_one(struct net_device *dev,
4954 struct netdev_queue *dev_queue,
4957 spin_lock_init(&dev_queue->_xmit_lock);
4958 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4959 dev_queue->xmit_lock_owner = -1;
4962 static void netdev_init_queue_locks(struct net_device *dev)
4964 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4965 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4968 unsigned long netdev_fix_features(unsigned long features, const char *name)
4970 /* Fix illegal SG+CSUM combinations. */
4971 if ((features & NETIF_F_SG) &&
4972 !(features & NETIF_F_ALL_CSUM)) {
4974 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4975 "checksum feature.\n", name);
4976 features &= ~NETIF_F_SG;
4979 /* TSO requires that SG is present as well. */
4980 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4982 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4983 "SG feature.\n", name);
4984 features &= ~NETIF_F_TSO;
4987 if (features & NETIF_F_UFO) {
4988 if (!(features & NETIF_F_GEN_CSUM)) {
4990 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4991 "since no NETIF_F_HW_CSUM feature.\n",
4993 features &= ~NETIF_F_UFO;
4996 if (!(features & NETIF_F_SG)) {
4998 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4999 "since no NETIF_F_SG feature.\n", name);
5000 features &= ~NETIF_F_UFO;
5006 EXPORT_SYMBOL(netdev_fix_features);
5009 * netif_stacked_transfer_operstate - transfer operstate
5010 * @rootdev: the root or lower level device to transfer state from
5011 * @dev: the device to transfer operstate to
5013 * Transfer operational state from root to device. This is normally
5014 * called when a stacking relationship exists between the root
5015 * device and the device(a leaf device).
5017 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5018 struct net_device *dev)
5020 if (rootdev->operstate == IF_OPER_DORMANT)
5021 netif_dormant_on(dev);
5023 netif_dormant_off(dev);
5025 if (netif_carrier_ok(rootdev)) {
5026 if (!netif_carrier_ok(dev))
5027 netif_carrier_on(dev);
5029 if (netif_carrier_ok(dev))
5030 netif_carrier_off(dev);
5033 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5036 * register_netdevice - register a network device
5037 * @dev: device to register
5039 * Take a completed network device structure and add it to the kernel
5040 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5041 * chain. 0 is returned on success. A negative errno code is returned
5042 * on a failure to set up the device, or if the name is a duplicate.
5044 * Callers must hold the rtnl semaphore. You may want
5045 * register_netdev() instead of this.
5048 * The locking appears insufficient to guarantee two parallel registers
5049 * will not get the same name.
5052 int register_netdevice(struct net_device *dev)
5055 struct net *net = dev_net(dev);
5057 BUG_ON(dev_boot_phase);
5062 /* When net_device's are persistent, this will be fatal. */
5063 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5066 spin_lock_init(&dev->addr_list_lock);
5067 netdev_set_addr_lockdep_class(dev);
5068 netdev_init_queue_locks(dev);
5072 /* Init, if this function is available */
5073 if (dev->netdev_ops->ndo_init) {
5074 ret = dev->netdev_ops->ndo_init(dev);
5082 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
5086 dev->ifindex = dev_new_index(net);
5087 if (dev->iflink == -1)
5088 dev->iflink = dev->ifindex;
5090 /* Fix illegal checksum combinations */
5091 if ((dev->features & NETIF_F_HW_CSUM) &&
5092 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5093 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5095 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5098 if ((dev->features & NETIF_F_NO_CSUM) &&
5099 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5100 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5102 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5105 dev->features = netdev_fix_features(dev->features, dev->name);
5107 /* Enable software GSO if SG is supported. */
5108 if (dev->features & NETIF_F_SG)
5109 dev->features |= NETIF_F_GSO;
5111 netdev_initialize_kobject(dev);
5113 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5114 ret = notifier_to_errno(ret);
5118 ret = netdev_register_kobject(dev);
5121 dev->reg_state = NETREG_REGISTERED;
5124 * Default initial state at registry is that the
5125 * device is present.
5128 set_bit(__LINK_STATE_PRESENT, &dev->state);
5130 dev_init_scheduler(dev);
5132 list_netdevice(dev);
5134 /* Notify protocols, that a new device appeared. */
5135 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5136 ret = notifier_to_errno(ret);
5138 rollback_registered(dev);
5139 dev->reg_state = NETREG_UNREGISTERED;
5142 * Prevent userspace races by waiting until the network
5143 * device is fully setup before sending notifications.
5145 if (!dev->rtnl_link_ops ||
5146 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5147 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5153 if (dev->netdev_ops->ndo_uninit)
5154 dev->netdev_ops->ndo_uninit(dev);
5157 EXPORT_SYMBOL(register_netdevice);
5160 * init_dummy_netdev - init a dummy network device for NAPI
5161 * @dev: device to init
5163 * This takes a network device structure and initialize the minimum
5164 * amount of fields so it can be used to schedule NAPI polls without
5165 * registering a full blown interface. This is to be used by drivers
5166 * that need to tie several hardware interfaces to a single NAPI
5167 * poll scheduler due to HW limitations.
5169 int init_dummy_netdev(struct net_device *dev)
5171 /* Clear everything. Note we don't initialize spinlocks
5172 * are they aren't supposed to be taken by any of the
5173 * NAPI code and this dummy netdev is supposed to be
5174 * only ever used for NAPI polls
5176 memset(dev, 0, sizeof(struct net_device));
5178 /* make sure we BUG if trying to hit standard
5179 * register/unregister code path
5181 dev->reg_state = NETREG_DUMMY;
5183 /* initialize the ref count */
5184 atomic_set(&dev->refcnt, 1);
5186 /* NAPI wants this */
5187 INIT_LIST_HEAD(&dev->napi_list);
5189 /* a dummy interface is started by default */
5190 set_bit(__LINK_STATE_PRESENT, &dev->state);
5191 set_bit(__LINK_STATE_START, &dev->state);
5195 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5199 * register_netdev - register a network device
5200 * @dev: device to register
5202 * Take a completed network device structure and add it to the kernel
5203 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5204 * chain. 0 is returned on success. A negative errno code is returned
5205 * on a failure to set up the device, or if the name is a duplicate.
5207 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5208 * and expands the device name if you passed a format string to
5211 int register_netdev(struct net_device *dev)
5218 * If the name is a format string the caller wants us to do a
5221 if (strchr(dev->name, '%')) {
5222 err = dev_alloc_name(dev, dev->name);
5227 err = register_netdevice(dev);
5232 EXPORT_SYMBOL(register_netdev);
5235 * netdev_wait_allrefs - wait until all references are gone.
5237 * This is called when unregistering network devices.
5239 * Any protocol or device that holds a reference should register
5240 * for netdevice notification, and cleanup and put back the
5241 * reference if they receive an UNREGISTER event.
5242 * We can get stuck here if buggy protocols don't correctly
5245 static void netdev_wait_allrefs(struct net_device *dev)
5247 unsigned long rebroadcast_time, warning_time;
5249 linkwatch_forget_dev(dev);
5251 rebroadcast_time = warning_time = jiffies;
5252 while (atomic_read(&dev->refcnt) != 0) {
5253 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5256 /* Rebroadcast unregister notification */
5257 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5258 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5259 * should have already handle it the first time */
5261 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5263 /* We must not have linkwatch events
5264 * pending on unregister. If this
5265 * happens, we simply run the queue
5266 * unscheduled, resulting in a noop
5269 linkwatch_run_queue();
5274 rebroadcast_time = jiffies;
5279 if (time_after(jiffies, warning_time + 10 * HZ)) {
5280 printk(KERN_EMERG "unregister_netdevice: "
5281 "waiting for %s to become free. Usage "
5283 dev->name, atomic_read(&dev->refcnt));
5284 warning_time = jiffies;
5293 * register_netdevice(x1);
5294 * register_netdevice(x2);
5296 * unregister_netdevice(y1);
5297 * unregister_netdevice(y2);
5303 * We are invoked by rtnl_unlock().
5304 * This allows us to deal with problems:
5305 * 1) We can delete sysfs objects which invoke hotplug
5306 * without deadlocking with linkwatch via keventd.
5307 * 2) Since we run with the RTNL semaphore not held, we can sleep
5308 * safely in order to wait for the netdev refcnt to drop to zero.
5310 * We must not return until all unregister events added during
5311 * the interval the lock was held have been completed.
5313 void netdev_run_todo(void)
5315 struct list_head list;
5317 /* Snapshot list, allow later requests */
5318 list_replace_init(&net_todo_list, &list);
5322 while (!list_empty(&list)) {
5323 struct net_device *dev
5324 = list_first_entry(&list, struct net_device, todo_list);
5325 list_del(&dev->todo_list);
5327 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5328 printk(KERN_ERR "network todo '%s' but state %d\n",
5329 dev->name, dev->reg_state);
5334 dev->reg_state = NETREG_UNREGISTERED;
5336 on_each_cpu(flush_backlog, dev, 1);
5338 netdev_wait_allrefs(dev);
5341 BUG_ON(atomic_read(&dev->refcnt));
5342 WARN_ON(dev->ip_ptr);
5343 WARN_ON(dev->ip6_ptr);
5344 WARN_ON(dev->dn_ptr);
5346 if (dev->destructor)
5347 dev->destructor(dev);
5349 /* Free network device */
5350 kobject_put(&dev->dev.kobj);
5355 * dev_txq_stats_fold - fold tx_queues stats
5356 * @dev: device to get statistics from
5357 * @stats: struct net_device_stats to hold results
5359 void dev_txq_stats_fold(const struct net_device *dev,
5360 struct net_device_stats *stats)
5362 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5364 struct netdev_queue *txq;
5366 for (i = 0; i < dev->num_tx_queues; i++) {
5367 txq = netdev_get_tx_queue(dev, i);
5368 tx_bytes += txq->tx_bytes;
5369 tx_packets += txq->tx_packets;
5370 tx_dropped += txq->tx_dropped;
5372 if (tx_bytes || tx_packets || tx_dropped) {
5373 stats->tx_bytes = tx_bytes;
5374 stats->tx_packets = tx_packets;
5375 stats->tx_dropped = tx_dropped;
5378 EXPORT_SYMBOL(dev_txq_stats_fold);
5381 * dev_get_stats - get network device statistics
5382 * @dev: device to get statistics from
5384 * Get network statistics from device. The device driver may provide
5385 * its own method by setting dev->netdev_ops->get_stats; otherwise
5386 * the internal statistics structure is used.
5388 const struct net_device_stats *dev_get_stats(struct net_device *dev)
5390 const struct net_device_ops *ops = dev->netdev_ops;
5392 if (ops->ndo_get_stats)
5393 return ops->ndo_get_stats(dev);
5395 dev_txq_stats_fold(dev, &dev->stats);
5398 EXPORT_SYMBOL(dev_get_stats);
5400 static void netdev_init_one_queue(struct net_device *dev,
5401 struct netdev_queue *queue,
5407 static void netdev_init_queues(struct net_device *dev)
5409 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5410 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5411 spin_lock_init(&dev->tx_global_lock);
5415 * alloc_netdev_mq - allocate network device
5416 * @sizeof_priv: size of private data to allocate space for
5417 * @name: device name format string
5418 * @setup: callback to initialize device
5419 * @queue_count: the number of subqueues to allocate
5421 * Allocates a struct net_device with private data area for driver use
5422 * and performs basic initialization. Also allocates subquue structs
5423 * for each queue on the device at the end of the netdevice.
5425 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5426 void (*setup)(struct net_device *), unsigned int queue_count)
5428 struct netdev_queue *tx;
5429 struct net_device *dev;
5431 struct net_device *p;
5433 BUG_ON(strlen(name) >= sizeof(dev->name));
5435 alloc_size = sizeof(struct net_device);
5437 /* ensure 32-byte alignment of private area */
5438 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5439 alloc_size += sizeof_priv;
5441 /* ensure 32-byte alignment of whole construct */
5442 alloc_size += NETDEV_ALIGN - 1;
5444 p = kzalloc(alloc_size, GFP_KERNEL);
5446 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5450 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
5452 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5457 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5458 dev->padded = (char *)dev - (char *)p;
5460 if (dev_addr_init(dev))
5463 dev_unicast_init(dev);
5465 dev_net_set(dev, &init_net);
5468 dev->num_tx_queues = queue_count;
5469 dev->real_num_tx_queues = queue_count;
5471 dev->gso_max_size = GSO_MAX_SIZE;
5473 netdev_init_queues(dev);
5475 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5476 dev->ethtool_ntuple_list.count = 0;
5477 INIT_LIST_HEAD(&dev->napi_list);
5478 INIT_LIST_HEAD(&dev->unreg_list);
5479 INIT_LIST_HEAD(&dev->link_watch_list);
5480 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5482 strcpy(dev->name, name);
5492 EXPORT_SYMBOL(alloc_netdev_mq);
5495 * free_netdev - free network device
5498 * This function does the last stage of destroying an allocated device
5499 * interface. The reference to the device object is released.
5500 * If this is the last reference then it will be freed.
5502 void free_netdev(struct net_device *dev)
5504 struct napi_struct *p, *n;
5506 release_net(dev_net(dev));
5510 /* Flush device addresses */
5511 dev_addr_flush(dev);
5513 /* Clear ethtool n-tuple list */
5514 ethtool_ntuple_flush(dev);
5516 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5519 /* Compatibility with error handling in drivers */
5520 if (dev->reg_state == NETREG_UNINITIALIZED) {
5521 kfree((char *)dev - dev->padded);
5525 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5526 dev->reg_state = NETREG_RELEASED;
5528 /* will free via device release */
5529 put_device(&dev->dev);
5531 EXPORT_SYMBOL(free_netdev);
5534 * synchronize_net - Synchronize with packet receive processing
5536 * Wait for packets currently being received to be done.
5537 * Does not block later packets from starting.
5539 void synchronize_net(void)
5544 EXPORT_SYMBOL(synchronize_net);
5547 * unregister_netdevice_queue - remove device from the kernel
5551 * This function shuts down a device interface and removes it
5552 * from the kernel tables.
5553 * If head not NULL, device is queued to be unregistered later.
5555 * Callers must hold the rtnl semaphore. You may want
5556 * unregister_netdev() instead of this.
5559 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5564 list_move_tail(&dev->unreg_list, head);
5566 rollback_registered(dev);
5567 /* Finish processing unregister after unlock */
5571 EXPORT_SYMBOL(unregister_netdevice_queue);
5574 * unregister_netdevice_many - unregister many devices
5575 * @head: list of devices
5577 void unregister_netdevice_many(struct list_head *head)
5579 struct net_device *dev;
5581 if (!list_empty(head)) {
5582 rollback_registered_many(head);
5583 list_for_each_entry(dev, head, unreg_list)
5587 EXPORT_SYMBOL(unregister_netdevice_many);
5590 * unregister_netdev - remove device from the kernel
5593 * This function shuts down a device interface and removes it
5594 * from the kernel tables.
5596 * This is just a wrapper for unregister_netdevice that takes
5597 * the rtnl semaphore. In general you want to use this and not
5598 * unregister_netdevice.
5600 void unregister_netdev(struct net_device *dev)
5603 unregister_netdevice(dev);
5606 EXPORT_SYMBOL(unregister_netdev);
5609 * dev_change_net_namespace - move device to different nethost namespace
5611 * @net: network namespace
5612 * @pat: If not NULL name pattern to try if the current device name
5613 * is already taken in the destination network namespace.
5615 * This function shuts down a device interface and moves it
5616 * to a new network namespace. On success 0 is returned, on
5617 * a failure a netagive errno code is returned.
5619 * Callers must hold the rtnl semaphore.
5622 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5628 /* Don't allow namespace local devices to be moved. */
5630 if (dev->features & NETIF_F_NETNS_LOCAL)
5634 /* Don't allow real devices to be moved when sysfs
5638 if (dev->dev.parent)
5642 /* Ensure the device has been registrered */
5644 if (dev->reg_state != NETREG_REGISTERED)
5647 /* Get out if there is nothing todo */
5649 if (net_eq(dev_net(dev), net))
5652 /* Pick the destination device name, and ensure
5653 * we can use it in the destination network namespace.
5656 if (__dev_get_by_name(net, dev->name)) {
5657 /* We get here if we can't use the current device name */
5660 if (dev_get_valid_name(net, pat, dev->name, 1))
5665 * And now a mini version of register_netdevice unregister_netdevice.
5668 /* If device is running close it first. */
5671 /* And unlink it from device chain */
5673 unlist_netdevice(dev);
5677 /* Shutdown queueing discipline. */
5680 /* Notify protocols, that we are about to destroy
5681 this device. They should clean all the things.
5683 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5684 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5687 * Flush the unicast and multicast chains
5689 dev_unicast_flush(dev);
5690 dev_addr_discard(dev);
5692 netdev_unregister_kobject(dev);
5694 /* Actually switch the network namespace */
5695 dev_net_set(dev, net);
5697 /* If there is an ifindex conflict assign a new one */
5698 if (__dev_get_by_index(net, dev->ifindex)) {
5699 int iflink = (dev->iflink == dev->ifindex);
5700 dev->ifindex = dev_new_index(net);
5702 dev->iflink = dev->ifindex;
5705 /* Fixup kobjects */
5706 err = netdev_register_kobject(dev);
5709 /* Add the device back in the hashes */
5710 list_netdevice(dev);
5712 /* Notify protocols, that a new device appeared. */
5713 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5716 * Prevent userspace races by waiting until the network
5717 * device is fully setup before sending notifications.
5719 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5726 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
5728 static int dev_cpu_callback(struct notifier_block *nfb,
5729 unsigned long action,
5732 struct sk_buff **list_skb;
5733 struct Qdisc **list_net;
5734 struct sk_buff *skb;
5735 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5736 struct softnet_data *sd, *oldsd;
5738 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5741 local_irq_disable();
5742 cpu = smp_processor_id();
5743 sd = &per_cpu(softnet_data, cpu);
5744 oldsd = &per_cpu(softnet_data, oldcpu);
5746 /* Find end of our completion_queue. */
5747 list_skb = &sd->completion_queue;
5749 list_skb = &(*list_skb)->next;
5750 /* Append completion queue from offline CPU. */
5751 *list_skb = oldsd->completion_queue;
5752 oldsd->completion_queue = NULL;
5754 /* Find end of our output_queue. */
5755 list_net = &sd->output_queue;
5757 list_net = &(*list_net)->next_sched;
5758 /* Append output queue from offline CPU. */
5759 *list_net = oldsd->output_queue;
5760 oldsd->output_queue = NULL;
5762 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5765 /* Process offline CPU's input_pkt_queue */
5766 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5774 * netdev_increment_features - increment feature set by one
5775 * @all: current feature set
5776 * @one: new feature set
5777 * @mask: mask feature set
5779 * Computes a new feature set after adding a device with feature set
5780 * @one to the master device with current feature set @all. Will not
5781 * enable anything that is off in @mask. Returns the new feature set.
5783 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5786 /* If device needs checksumming, downgrade to it. */
5787 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5788 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5789 else if (mask & NETIF_F_ALL_CSUM) {
5790 /* If one device supports v4/v6 checksumming, set for all. */
5791 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5792 !(all & NETIF_F_GEN_CSUM)) {
5793 all &= ~NETIF_F_ALL_CSUM;
5794 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5797 /* If one device supports hw checksumming, set for all. */
5798 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5799 all &= ~NETIF_F_ALL_CSUM;
5800 all |= NETIF_F_HW_CSUM;
5804 one |= NETIF_F_ALL_CSUM;
5806 one |= all & NETIF_F_ONE_FOR_ALL;
5807 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
5808 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5812 EXPORT_SYMBOL(netdev_increment_features);
5814 static struct hlist_head *netdev_create_hash(void)
5817 struct hlist_head *hash;
5819 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5821 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5822 INIT_HLIST_HEAD(&hash[i]);
5827 /* Initialize per network namespace state */
5828 static int __net_init netdev_init(struct net *net)
5830 INIT_LIST_HEAD(&net->dev_base_head);
5832 net->dev_name_head = netdev_create_hash();
5833 if (net->dev_name_head == NULL)
5836 net->dev_index_head = netdev_create_hash();
5837 if (net->dev_index_head == NULL)
5843 kfree(net->dev_name_head);
5849 * netdev_drivername - network driver for the device
5850 * @dev: network device
5851 * @buffer: buffer for resulting name
5852 * @len: size of buffer
5854 * Determine network driver for device.
5856 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5858 const struct device_driver *driver;
5859 const struct device *parent;
5861 if (len <= 0 || !buffer)
5865 parent = dev->dev.parent;
5870 driver = parent->driver;
5871 if (driver && driver->name)
5872 strlcpy(buffer, driver->name, len);
5876 static void __net_exit netdev_exit(struct net *net)
5878 kfree(net->dev_name_head);
5879 kfree(net->dev_index_head);
5882 static struct pernet_operations __net_initdata netdev_net_ops = {
5883 .init = netdev_init,
5884 .exit = netdev_exit,
5887 static void __net_exit default_device_exit(struct net *net)
5889 struct net_device *dev, *aux;
5891 * Push all migratable network devices back to the
5892 * initial network namespace
5895 for_each_netdev_safe(net, dev, aux) {
5897 char fb_name[IFNAMSIZ];
5899 /* Ignore unmoveable devices (i.e. loopback) */
5900 if (dev->features & NETIF_F_NETNS_LOCAL)
5903 /* Leave virtual devices for the generic cleanup */
5904 if (dev->rtnl_link_ops)
5907 /* Push remaing network devices to init_net */
5908 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5909 err = dev_change_net_namespace(dev, &init_net, fb_name);
5911 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5912 __func__, dev->name, err);
5919 static void __net_exit default_device_exit_batch(struct list_head *net_list)
5921 /* At exit all network devices most be removed from a network
5922 * namespace. Do this in the reverse order of registeration.
5923 * Do this across as many network namespaces as possible to
5924 * improve batching efficiency.
5926 struct net_device *dev;
5928 LIST_HEAD(dev_kill_list);
5931 list_for_each_entry(net, net_list, exit_list) {
5932 for_each_netdev_reverse(net, dev) {
5933 if (dev->rtnl_link_ops)
5934 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5936 unregister_netdevice_queue(dev, &dev_kill_list);
5939 unregister_netdevice_many(&dev_kill_list);
5943 static struct pernet_operations __net_initdata default_device_ops = {
5944 .exit = default_device_exit,
5945 .exit_batch = default_device_exit_batch,
5949 * Initialize the DEV module. At boot time this walks the device list and
5950 * unhooks any devices that fail to initialise (normally hardware not
5951 * present) and leaves us with a valid list of present and active devices.
5956 * This is called single threaded during boot, so no need
5957 * to take the rtnl semaphore.
5959 static int __init net_dev_init(void)
5961 int i, rc = -ENOMEM;
5963 BUG_ON(!dev_boot_phase);
5965 if (dev_proc_init())
5968 if (netdev_kobject_init())
5971 INIT_LIST_HEAD(&ptype_all);
5972 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5973 INIT_LIST_HEAD(&ptype_base[i]);
5975 if (register_pernet_subsys(&netdev_net_ops))
5979 * Initialise the packet receive queues.
5982 for_each_possible_cpu(i) {
5983 struct softnet_data *queue;
5985 queue = &per_cpu(softnet_data, i);
5986 skb_queue_head_init(&queue->input_pkt_queue);
5987 queue->completion_queue = NULL;
5988 INIT_LIST_HEAD(&queue->poll_list);
5990 queue->backlog.poll = process_backlog;
5991 queue->backlog.weight = weight_p;
5992 queue->backlog.gro_list = NULL;
5993 queue->backlog.gro_count = 0;
5998 /* The loopback device is special if any other network devices
5999 * is present in a network namespace the loopback device must
6000 * be present. Since we now dynamically allocate and free the
6001 * loopback device ensure this invariant is maintained by
6002 * keeping the loopback device as the first device on the
6003 * list of network devices. Ensuring the loopback devices
6004 * is the first device that appears and the last network device
6007 if (register_pernet_device(&loopback_net_ops))
6010 if (register_pernet_device(&default_device_ops))
6013 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6014 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6016 hotcpu_notifier(dev_cpu_callback, 0);
6024 subsys_initcall(net_dev_init);
6026 static int __init initialize_hashrnd(void)
6028 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
6032 late_initcall_sync(initialize_hashrnd);