2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the Interfaces handler.
8 * Version: @(#)dev.h 1.0.10 08/12/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
23 * Moved to /usr/include/linux for NET3
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
28 #include <linux/pm_qos.h>
29 #include <linux/timer.h>
30 #include <linux/bug.h>
31 #include <linux/delay.h>
32 #include <linux/atomic.h>
33 #include <linux/prefetch.h>
34 #include <asm/cache.h>
35 #include <asm/byteorder.h>
37 #include <linux/percpu.h>
38 #include <linux/rculist.h>
39 #include <linux/dmaengine.h>
40 #include <linux/workqueue.h>
41 #include <linux/dynamic_queue_limits.h>
43 #include <linux/ethtool.h>
44 #include <net/net_namespace.h>
47 #include <net/dcbnl.h>
49 #include <net/netprio_cgroup.h>
51 #include <linux/netdev_features.h>
52 #include <linux/neighbour.h>
53 #include <uapi/linux/netdevice.h>
60 /* 802.15.4 specific */
63 void netdev_set_default_ethtool_ops(struct net_device *dev,
64 const struct ethtool_ops *ops);
66 /* Backlog congestion levels */
67 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
68 #define NET_RX_DROP 1 /* packet dropped */
71 * Transmit return codes: transmit return codes originate from three different
74 * - qdisc return codes
75 * - driver transmit return codes
78 * Drivers are allowed to return any one of those in their hard_start_xmit()
79 * function. Real network devices commonly used with qdiscs should only return
80 * the driver transmit return codes though - when qdiscs are used, the actual
81 * transmission happens asynchronously, so the value is not propagated to
82 * higher layers. Virtual network devices transmit synchronously, in this case
83 * the driver transmit return codes are consumed by dev_queue_xmit(), all
84 * others are propagated to higher layers.
87 /* qdisc ->enqueue() return codes. */
88 #define NET_XMIT_SUCCESS 0x00
89 #define NET_XMIT_DROP 0x01 /* skb dropped */
90 #define NET_XMIT_CN 0x02 /* congestion notification */
91 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
92 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
94 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
95 * indicates that the device will soon be dropping packets, or already drops
96 * some packets of the same priority; prompting us to send less aggressively. */
97 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
98 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
100 /* Driver transmit return codes */
101 #define NETDEV_TX_MASK 0xf0
104 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
105 NETDEV_TX_OK = 0x00, /* driver took care of packet */
106 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
107 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
109 typedef enum netdev_tx netdev_tx_t;
112 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
113 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
115 static inline bool dev_xmit_complete(int rc)
118 * Positive cases with an skb consumed by a driver:
119 * - successful transmission (rc == NETDEV_TX_OK)
120 * - error while transmitting (rc < 0)
121 * - error while queueing to a different device (rc & NET_XMIT_MASK)
123 if (likely(rc < NET_XMIT_MASK))
130 * Compute the worst case header length according to the protocols
134 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
135 # if defined(CONFIG_MAC80211_MESH)
136 # define LL_MAX_HEADER 128
138 # define LL_MAX_HEADER 96
141 # define LL_MAX_HEADER 32
144 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
145 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
146 #define MAX_HEADER LL_MAX_HEADER
148 #define MAX_HEADER (LL_MAX_HEADER + 48)
152 * Old network device statistics. Fields are native words
153 * (unsigned long) so they can be read and written atomically.
156 struct net_device_stats {
157 unsigned long rx_packets;
158 unsigned long tx_packets;
159 unsigned long rx_bytes;
160 unsigned long tx_bytes;
161 unsigned long rx_errors;
162 unsigned long tx_errors;
163 unsigned long rx_dropped;
164 unsigned long tx_dropped;
165 unsigned long multicast;
166 unsigned long collisions;
167 unsigned long rx_length_errors;
168 unsigned long rx_over_errors;
169 unsigned long rx_crc_errors;
170 unsigned long rx_frame_errors;
171 unsigned long rx_fifo_errors;
172 unsigned long rx_missed_errors;
173 unsigned long tx_aborted_errors;
174 unsigned long tx_carrier_errors;
175 unsigned long tx_fifo_errors;
176 unsigned long tx_heartbeat_errors;
177 unsigned long tx_window_errors;
178 unsigned long rx_compressed;
179 unsigned long tx_compressed;
183 #include <linux/cache.h>
184 #include <linux/skbuff.h>
187 #include <linux/static_key.h>
188 extern struct static_key rps_needed;
195 struct netdev_hw_addr {
196 struct list_head list;
197 unsigned char addr[MAX_ADDR_LEN];
199 #define NETDEV_HW_ADDR_T_LAN 1
200 #define NETDEV_HW_ADDR_T_SAN 2
201 #define NETDEV_HW_ADDR_T_SLAVE 3
202 #define NETDEV_HW_ADDR_T_UNICAST 4
203 #define NETDEV_HW_ADDR_T_MULTICAST 5
208 struct rcu_head rcu_head;
211 struct netdev_hw_addr_list {
212 struct list_head list;
216 #define netdev_hw_addr_list_count(l) ((l)->count)
217 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
218 #define netdev_hw_addr_list_for_each(ha, l) \
219 list_for_each_entry(ha, &(l)->list, list)
221 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
222 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
223 #define netdev_for_each_uc_addr(ha, dev) \
224 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
226 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
227 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
228 #define netdev_for_each_mc_addr(ha, dev) \
229 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
236 /* cached hardware header; allow for machine alignment needs. */
237 #define HH_DATA_MOD 16
238 #define HH_DATA_OFF(__len) \
239 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
240 #define HH_DATA_ALIGN(__len) \
241 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
242 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
245 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
247 * dev->hard_header_len ? (dev->hard_header_len +
248 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
250 * We could use other alignment values, but we must maintain the
251 * relationship HH alignment <= LL alignment.
253 #define LL_RESERVED_SPACE(dev) \
254 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
255 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
256 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
259 int (*create) (struct sk_buff *skb, struct net_device *dev,
260 unsigned short type, const void *daddr,
261 const void *saddr, unsigned int len);
262 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
263 int (*rebuild)(struct sk_buff *skb);
264 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
265 void (*cache_update)(struct hh_cache *hh,
266 const struct net_device *dev,
267 const unsigned char *haddr);
270 /* These flag bits are private to the generic network queueing
271 * layer, they may not be explicitly referenced by any other
275 enum netdev_state_t {
277 __LINK_STATE_PRESENT,
278 __LINK_STATE_NOCARRIER,
279 __LINK_STATE_LINKWATCH_PENDING,
280 __LINK_STATE_DORMANT,
285 * This structure holds at boot time configured netdevice settings. They
286 * are then used in the device probing.
288 struct netdev_boot_setup {
292 #define NETDEV_BOOT_SETUP_MAX 8
294 int __init netdev_boot_setup(char *str);
297 * Structure for NAPI scheduling similar to tasklet but with weighting
300 /* The poll_list must only be managed by the entity which
301 * changes the state of the NAPI_STATE_SCHED bit. This means
302 * whoever atomically sets that bit can add this napi_struct
303 * to the per-cpu poll_list, and whoever clears that bit
304 * can remove from the list right before clearing the bit.
306 struct list_head poll_list;
310 unsigned int gro_count;
311 int (*poll)(struct napi_struct *, int);
312 #ifdef CONFIG_NETPOLL
313 spinlock_t poll_lock;
316 struct net_device *dev;
317 struct sk_buff *gro_list;
319 struct hrtimer timer;
320 struct list_head dev_list;
321 struct hlist_node napi_hash_node;
322 unsigned int napi_id;
326 NAPI_STATE_SCHED, /* Poll is scheduled */
327 NAPI_STATE_DISABLE, /* Disable pending */
328 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
329 NAPI_STATE_HASHED, /* In NAPI hash */
339 typedef enum gro_result gro_result_t;
342 * enum rx_handler_result - Possible return values for rx_handlers.
343 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
345 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
346 * case skb->dev was changed by rx_handler.
347 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
348 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
350 * rx_handlers are functions called from inside __netif_receive_skb(), to do
351 * special processing of the skb, prior to delivery to protocol handlers.
353 * Currently, a net_device can only have a single rx_handler registered. Trying
354 * to register a second rx_handler will return -EBUSY.
356 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
357 * To unregister a rx_handler on a net_device, use
358 * netdev_rx_handler_unregister().
360 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
363 * If the rx_handler consumed to skb in some way, it should return
364 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
365 * the skb to be delivered in some other ways.
367 * If the rx_handler changed skb->dev, to divert the skb to another
368 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
369 * new device will be called if it exists.
371 * If the rx_handler consider the skb should be ignored, it should return
372 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
373 * are registered on exact device (ptype->dev == skb->dev).
375 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
376 * delivered, it should return RX_HANDLER_PASS.
378 * A device without a registered rx_handler will behave as if rx_handler
379 * returned RX_HANDLER_PASS.
382 enum rx_handler_result {
388 typedef enum rx_handler_result rx_handler_result_t;
389 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
391 void __napi_schedule(struct napi_struct *n);
392 void __napi_schedule_irqoff(struct napi_struct *n);
394 static inline bool napi_disable_pending(struct napi_struct *n)
396 return test_bit(NAPI_STATE_DISABLE, &n->state);
400 * napi_schedule_prep - check if napi can be scheduled
403 * Test if NAPI routine is already running, and if not mark
404 * it as running. This is used as a condition variable
405 * insure only one NAPI poll instance runs. We also make
406 * sure there is no pending NAPI disable.
408 static inline bool napi_schedule_prep(struct napi_struct *n)
410 return !napi_disable_pending(n) &&
411 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
415 * napi_schedule - schedule NAPI poll
418 * Schedule NAPI poll routine to be called if it is not already
421 static inline void napi_schedule(struct napi_struct *n)
423 if (napi_schedule_prep(n))
428 * napi_schedule_irqoff - schedule NAPI poll
431 * Variant of napi_schedule(), assuming hard irqs are masked.
433 static inline void napi_schedule_irqoff(struct napi_struct *n)
435 if (napi_schedule_prep(n))
436 __napi_schedule_irqoff(n);
439 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
440 static inline bool napi_reschedule(struct napi_struct *napi)
442 if (napi_schedule_prep(napi)) {
443 __napi_schedule(napi);
449 void __napi_complete(struct napi_struct *n);
450 void napi_complete_done(struct napi_struct *n, int work_done);
452 * napi_complete - NAPI processing complete
455 * Mark NAPI processing as complete.
456 * Consider using napi_complete_done() instead.
458 static inline void napi_complete(struct napi_struct *n)
460 return napi_complete_done(n, 0);
464 * napi_by_id - lookup a NAPI by napi_id
465 * @napi_id: hashed napi_id
467 * lookup @napi_id in napi_hash table
468 * must be called under rcu_read_lock()
470 struct napi_struct *napi_by_id(unsigned int napi_id);
473 * napi_hash_add - add a NAPI to global hashtable
474 * @napi: napi context
476 * generate a new napi_id and store a @napi under it in napi_hash
478 void napi_hash_add(struct napi_struct *napi);
481 * napi_hash_del - remove a NAPI from global table
482 * @napi: napi context
484 * Warning: caller must observe rcu grace period
485 * before freeing memory containing @napi
487 void napi_hash_del(struct napi_struct *napi);
490 * napi_disable - prevent NAPI from scheduling
493 * Stop NAPI from being scheduled on this context.
494 * Waits till any outstanding processing completes.
496 void napi_disable(struct napi_struct *n);
499 * napi_enable - enable NAPI scheduling
502 * Resume NAPI from being scheduled on this context.
503 * Must be paired with napi_disable.
505 static inline void napi_enable(struct napi_struct *n)
507 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
508 smp_mb__before_atomic();
509 clear_bit(NAPI_STATE_SCHED, &n->state);
514 * napi_synchronize - wait until NAPI is not running
517 * Wait until NAPI is done being scheduled on this context.
518 * Waits till any outstanding processing completes but
519 * does not disable future activations.
521 static inline void napi_synchronize(const struct napi_struct *n)
523 while (test_bit(NAPI_STATE_SCHED, &n->state))
527 # define napi_synchronize(n) barrier()
530 enum netdev_queue_state_t {
531 __QUEUE_STATE_DRV_XOFF,
532 __QUEUE_STATE_STACK_XOFF,
533 __QUEUE_STATE_FROZEN,
536 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
537 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
538 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
540 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
541 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
543 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
547 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
548 * netif_tx_* functions below are used to manipulate this flag. The
549 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
550 * queue independently. The netif_xmit_*stopped functions below are called
551 * to check if the queue has been stopped by the driver or stack (either
552 * of the XOFF bits are set in the state). Drivers should not need to call
553 * netif_xmit*stopped functions, they should only be using netif_tx_*.
556 struct netdev_queue {
560 struct net_device *dev;
561 struct Qdisc __rcu *qdisc;
562 struct Qdisc *qdisc_sleeping;
566 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
572 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
575 * please use this field instead of dev->trans_start
577 unsigned long trans_start;
580 * Number of TX timeouts for this queue
581 * (/sys/class/net/DEV/Q/trans_timeout)
583 unsigned long trans_timeout;
590 } ____cacheline_aligned_in_smp;
592 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
594 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
601 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
603 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
610 * This structure holds an RPS map which can be of variable length. The
611 * map is an array of CPUs.
618 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
621 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
622 * tail pointer for that CPU's input queue at the time of last enqueue, and
623 * a hardware filter index.
625 struct rps_dev_flow {
628 unsigned int last_qtail;
630 #define RPS_NO_FILTER 0xffff
633 * The rps_dev_flow_table structure contains a table of flow mappings.
635 struct rps_dev_flow_table {
638 struct rps_dev_flow flows[0];
640 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
641 ((_num) * sizeof(struct rps_dev_flow)))
644 * The rps_sock_flow_table contains mappings of flows to the last CPU
645 * on which they were processed by the application (set in recvmsg).
647 struct rps_sock_flow_table {
651 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
652 ((_num) * sizeof(u16)))
654 #define RPS_NO_CPU 0xffff
656 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
660 unsigned int cpu, index = hash & table->mask;
662 /* We only give a hint, preemption can change cpu under us */
663 cpu = raw_smp_processor_id();
665 if (table->ents[index] != cpu)
666 table->ents[index] = cpu;
670 static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
674 table->ents[hash & table->mask] = RPS_NO_CPU;
677 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
679 #ifdef CONFIG_RFS_ACCEL
680 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
683 #endif /* CONFIG_RPS */
685 /* This structure contains an instance of an RX queue. */
686 struct netdev_rx_queue {
688 struct rps_map __rcu *rps_map;
689 struct rps_dev_flow_table __rcu *rps_flow_table;
692 struct net_device *dev;
693 } ____cacheline_aligned_in_smp;
696 * RX queue sysfs structures and functions.
698 struct rx_queue_attribute {
699 struct attribute attr;
700 ssize_t (*show)(struct netdev_rx_queue *queue,
701 struct rx_queue_attribute *attr, char *buf);
702 ssize_t (*store)(struct netdev_rx_queue *queue,
703 struct rx_queue_attribute *attr, const char *buf, size_t len);
708 * This structure holds an XPS map which can be of variable length. The
709 * map is an array of queues.
713 unsigned int alloc_len;
717 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
718 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
722 * This structure holds all XPS maps for device. Maps are indexed by CPU.
724 struct xps_dev_maps {
726 struct xps_map __rcu *cpu_map[0];
728 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
729 (nr_cpu_ids * sizeof(struct xps_map *)))
730 #endif /* CONFIG_XPS */
732 #define TC_MAX_QUEUE 16
733 #define TC_BITMASK 15
734 /* HW offloaded queuing disciplines txq count and offset maps */
735 struct netdev_tc_txq {
740 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
742 * This structure is to hold information about the device
743 * configured to run FCoE protocol stack.
745 struct netdev_fcoe_hbainfo {
746 char manufacturer[64];
747 char serial_number[64];
748 char hardware_version[64];
749 char driver_version[64];
750 char optionrom_version[64];
751 char firmware_version[64];
753 char model_description[256];
757 #define MAX_PHYS_ITEM_ID_LEN 32
759 /* This structure holds a unique identifier to identify some
760 * physical item (port for example) used by a netdevice.
762 struct netdev_phys_item_id {
763 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
764 unsigned char id_len;
767 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
768 struct sk_buff *skb);
771 * This structure defines the management hooks for network devices.
772 * The following hooks can be defined; unless noted otherwise, they are
773 * optional and can be filled with a null pointer.
775 * int (*ndo_init)(struct net_device *dev);
776 * This function is called once when network device is registered.
777 * The network device can use this to any late stage initializaton
778 * or semantic validattion. It can fail with an error code which will
779 * be propogated back to register_netdev
781 * void (*ndo_uninit)(struct net_device *dev);
782 * This function is called when device is unregistered or when registration
783 * fails. It is not called if init fails.
785 * int (*ndo_open)(struct net_device *dev);
786 * This function is called when network device transistions to the up
789 * int (*ndo_stop)(struct net_device *dev);
790 * This function is called when network device transistions to the down
793 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
794 * struct net_device *dev);
795 * Called when a packet needs to be transmitted.
796 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
797 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
798 * Required can not be NULL.
800 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
801 * void *accel_priv, select_queue_fallback_t fallback);
802 * Called to decide which queue to when device supports multiple
805 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
806 * This function is called to allow device receiver to make
807 * changes to configuration when multicast or promiscious is enabled.
809 * void (*ndo_set_rx_mode)(struct net_device *dev);
810 * This function is called device changes address list filtering.
811 * If driver handles unicast address filtering, it should set
812 * IFF_UNICAST_FLT to its priv_flags.
814 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
815 * This function is called when the Media Access Control address
816 * needs to be changed. If this interface is not defined, the
817 * mac address can not be changed.
819 * int (*ndo_validate_addr)(struct net_device *dev);
820 * Test if Media Access Control address is valid for the device.
822 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
823 * Called when a user request an ioctl which can't be handled by
824 * the generic interface code. If not defined ioctl's return
825 * not supported error code.
827 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
828 * Used to set network devices bus interface parameters. This interface
829 * is retained for legacy reason, new devices should use the bus
830 * interface (PCI) for low level management.
832 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
833 * Called when a user wants to change the Maximum Transfer Unit
834 * of a device. If not defined, any request to change MTU will
835 * will return an error.
837 * void (*ndo_tx_timeout)(struct net_device *dev);
838 * Callback uses when the transmitter has not made any progress
839 * for dev->watchdog ticks.
841 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
842 * struct rtnl_link_stats64 *storage);
843 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
844 * Called when a user wants to get the network device usage
845 * statistics. Drivers must do one of the following:
846 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
847 * rtnl_link_stats64 structure passed by the caller.
848 * 2. Define @ndo_get_stats to update a net_device_stats structure
849 * (which should normally be dev->stats) and return a pointer to
850 * it. The structure may be changed asynchronously only if each
851 * field is written atomically.
852 * 3. Update dev->stats asynchronously and atomically, and define
855 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
856 * If device support VLAN filtering this function is called when a
857 * VLAN id is registered.
859 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
860 * If device support VLAN filtering this function is called when a
861 * VLAN id is unregistered.
863 * void (*ndo_poll_controller)(struct net_device *dev);
865 * SR-IOV management functions.
866 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
867 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
868 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
870 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
871 * int (*ndo_get_vf_config)(struct net_device *dev,
872 * int vf, struct ifla_vf_info *ivf);
873 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
874 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
875 * struct nlattr *port[]);
876 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
877 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
878 * Called to setup 'tc' number of traffic classes in the net device. This
879 * is always called from the stack with the rtnl lock held and netif tx
880 * queues stopped. This allows the netdevice to perform queue management
883 * Fiber Channel over Ethernet (FCoE) offload functions.
884 * int (*ndo_fcoe_enable)(struct net_device *dev);
885 * Called when the FCoE protocol stack wants to start using LLD for FCoE
886 * so the underlying device can perform whatever needed configuration or
887 * initialization to support acceleration of FCoE traffic.
889 * int (*ndo_fcoe_disable)(struct net_device *dev);
890 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
891 * so the underlying device can perform whatever needed clean-ups to
892 * stop supporting acceleration of FCoE traffic.
894 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
895 * struct scatterlist *sgl, unsigned int sgc);
896 * Called when the FCoE Initiator wants to initialize an I/O that
897 * is a possible candidate for Direct Data Placement (DDP). The LLD can
898 * perform necessary setup and returns 1 to indicate the device is set up
899 * successfully to perform DDP on this I/O, otherwise this returns 0.
901 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
902 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
903 * indicated by the FC exchange id 'xid', so the underlying device can
904 * clean up and reuse resources for later DDP requests.
906 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
907 * struct scatterlist *sgl, unsigned int sgc);
908 * Called when the FCoE Target wants to initialize an I/O that
909 * is a possible candidate for Direct Data Placement (DDP). The LLD can
910 * perform necessary setup and returns 1 to indicate the device is set up
911 * successfully to perform DDP on this I/O, otherwise this returns 0.
913 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
914 * struct netdev_fcoe_hbainfo *hbainfo);
915 * Called when the FCoE Protocol stack wants information on the underlying
916 * device. This information is utilized by the FCoE protocol stack to
917 * register attributes with Fiber Channel management service as per the
918 * FC-GS Fabric Device Management Information(FDMI) specification.
920 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
921 * Called when the underlying device wants to override default World Wide
922 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
923 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
924 * protocol stack to use.
927 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
928 * u16 rxq_index, u32 flow_id);
929 * Set hardware filter for RFS. rxq_index is the target queue index;
930 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
931 * Return the filter ID on success, or a negative error code.
933 * Slave management functions (for bridge, bonding, etc).
934 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
935 * Called to make another netdev an underling.
937 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
938 * Called to release previously enslaved netdev.
940 * Feature/offload setting functions.
941 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
942 * netdev_features_t features);
943 * Adjusts the requested feature flags according to device-specific
944 * constraints, and returns the resulting flags. Must not modify
947 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
948 * Called to update device configuration to new features. Passed
949 * feature set might be less than what was returned by ndo_fix_features()).
950 * Must return >0 or -errno if it changed dev->features itself.
952 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
953 * struct net_device *dev,
954 * const unsigned char *addr, u16 vid, u16 flags)
955 * Adds an FDB entry to dev for addr.
956 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
957 * struct net_device *dev,
958 * const unsigned char *addr, u16 vid)
959 * Deletes the FDB entry from dev coresponding to addr.
960 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
961 * struct net_device *dev, struct net_device *filter_dev,
963 * Used to add FDB entries to dump requests. Implementers should add
964 * entries to skb and update idx with the number of entries.
966 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
967 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
968 * struct net_device *dev, u32 filter_mask)
970 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
971 * Called to change device carrier. Soft-devices (like dummy, team, etc)
972 * which do not represent real hardware may define this to allow their
973 * userspace components to manage their virtual carrier state. Devices
974 * that determine carrier state from physical hardware properties (eg
975 * network cables) or protocol-dependent mechanisms (eg
976 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
978 * int (*ndo_get_phys_port_id)(struct net_device *dev,
979 * struct netdev_phys_item_id *ppid);
980 * Called to get ID of physical port of this device. If driver does
981 * not implement this, it is assumed that the hw is not able to have
982 * multiple net devices on single physical port.
984 * void (*ndo_add_vxlan_port)(struct net_device *dev,
985 * sa_family_t sa_family, __be16 port);
986 * Called by vxlan to notiy a driver about the UDP port and socket
987 * address family that vxlan is listnening to. It is called only when
988 * a new port starts listening. The operation is protected by the
989 * vxlan_net->sock_lock.
991 * void (*ndo_del_vxlan_port)(struct net_device *dev,
992 * sa_family_t sa_family, __be16 port);
993 * Called by vxlan to notify the driver about a UDP port and socket
994 * address family that vxlan is not listening to anymore. The operation
995 * is protected by the vxlan_net->sock_lock.
997 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
998 * struct net_device *dev)
999 * Called by upper layer devices to accelerate switching or other
1000 * station functionality into hardware. 'pdev is the lowerdev
1001 * to use for the offload and 'dev' is the net device that will
1002 * back the offload. Returns a pointer to the private structure
1003 * the upper layer will maintain.
1004 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1005 * Called by upper layer device to delete the station created
1006 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1007 * the station and priv is the structure returned by the add
1009 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
1010 * struct net_device *dev,
1012 * Callback to use for xmit over the accelerated station. This
1013 * is used in place of ndo_start_xmit on accelerated net
1015 * bool (*ndo_gso_check) (struct sk_buff *skb,
1016 * struct net_device *dev);
1017 * Called by core transmit path to determine if device is capable of
1018 * performing GSO on a packet. The device returns true if it is
1019 * able to GSO the packet, false otherwise. If the return value is
1020 * false the stack will do software GSO.
1022 * int (*ndo_switch_parent_id_get)(struct net_device *dev,
1023 * struct netdev_phys_item_id *psid);
1024 * Called to get an ID of the switch chip this port is part of.
1025 * If driver implements this, it indicates that it represents a port
1027 * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
1028 * Called to notify switch device port of bridge port STP
1031 struct net_device_ops {
1032 int (*ndo_init)(struct net_device *dev);
1033 void (*ndo_uninit)(struct net_device *dev);
1034 int (*ndo_open)(struct net_device *dev);
1035 int (*ndo_stop)(struct net_device *dev);
1036 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
1037 struct net_device *dev);
1038 u16 (*ndo_select_queue)(struct net_device *dev,
1039 struct sk_buff *skb,
1041 select_queue_fallback_t fallback);
1042 void (*ndo_change_rx_flags)(struct net_device *dev,
1044 void (*ndo_set_rx_mode)(struct net_device *dev);
1045 int (*ndo_set_mac_address)(struct net_device *dev,
1047 int (*ndo_validate_addr)(struct net_device *dev);
1048 int (*ndo_do_ioctl)(struct net_device *dev,
1049 struct ifreq *ifr, int cmd);
1050 int (*ndo_set_config)(struct net_device *dev,
1052 int (*ndo_change_mtu)(struct net_device *dev,
1054 int (*ndo_neigh_setup)(struct net_device *dev,
1055 struct neigh_parms *);
1056 void (*ndo_tx_timeout) (struct net_device *dev);
1058 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
1059 struct rtnl_link_stats64 *storage);
1060 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1062 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1063 __be16 proto, u16 vid);
1064 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1065 __be16 proto, u16 vid);
1066 #ifdef CONFIG_NET_POLL_CONTROLLER
1067 void (*ndo_poll_controller)(struct net_device *dev);
1068 int (*ndo_netpoll_setup)(struct net_device *dev,
1069 struct netpoll_info *info);
1070 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1072 #ifdef CONFIG_NET_RX_BUSY_POLL
1073 int (*ndo_busy_poll)(struct napi_struct *dev);
1075 int (*ndo_set_vf_mac)(struct net_device *dev,
1076 int queue, u8 *mac);
1077 int (*ndo_set_vf_vlan)(struct net_device *dev,
1078 int queue, u16 vlan, u8 qos);
1079 int (*ndo_set_vf_rate)(struct net_device *dev,
1080 int vf, int min_tx_rate,
1082 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1083 int vf, bool setting);
1084 int (*ndo_get_vf_config)(struct net_device *dev,
1086 struct ifla_vf_info *ivf);
1087 int (*ndo_set_vf_link_state)(struct net_device *dev,
1088 int vf, int link_state);
1089 int (*ndo_set_vf_port)(struct net_device *dev,
1091 struct nlattr *port[]);
1092 int (*ndo_get_vf_port)(struct net_device *dev,
1093 int vf, struct sk_buff *skb);
1094 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
1095 #if IS_ENABLED(CONFIG_FCOE)
1096 int (*ndo_fcoe_enable)(struct net_device *dev);
1097 int (*ndo_fcoe_disable)(struct net_device *dev);
1098 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1100 struct scatterlist *sgl,
1102 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1104 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1106 struct scatterlist *sgl,
1108 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1109 struct netdev_fcoe_hbainfo *hbainfo);
1112 #if IS_ENABLED(CONFIG_LIBFCOE)
1113 #define NETDEV_FCOE_WWNN 0
1114 #define NETDEV_FCOE_WWPN 1
1115 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1116 u64 *wwn, int type);
1119 #ifdef CONFIG_RFS_ACCEL
1120 int (*ndo_rx_flow_steer)(struct net_device *dev,
1121 const struct sk_buff *skb,
1125 int (*ndo_add_slave)(struct net_device *dev,
1126 struct net_device *slave_dev);
1127 int (*ndo_del_slave)(struct net_device *dev,
1128 struct net_device *slave_dev);
1129 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1130 netdev_features_t features);
1131 int (*ndo_set_features)(struct net_device *dev,
1132 netdev_features_t features);
1133 int (*ndo_neigh_construct)(struct neighbour *n);
1134 void (*ndo_neigh_destroy)(struct neighbour *n);
1136 int (*ndo_fdb_add)(struct ndmsg *ndm,
1137 struct nlattr *tb[],
1138 struct net_device *dev,
1139 const unsigned char *addr,
1142 int (*ndo_fdb_del)(struct ndmsg *ndm,
1143 struct nlattr *tb[],
1144 struct net_device *dev,
1145 const unsigned char *addr,
1147 int (*ndo_fdb_dump)(struct sk_buff *skb,
1148 struct netlink_callback *cb,
1149 struct net_device *dev,
1150 struct net_device *filter_dev,
1153 int (*ndo_bridge_setlink)(struct net_device *dev,
1154 struct nlmsghdr *nlh);
1155 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1157 struct net_device *dev,
1159 int (*ndo_bridge_dellink)(struct net_device *dev,
1160 struct nlmsghdr *nlh);
1161 int (*ndo_change_carrier)(struct net_device *dev,
1163 int (*ndo_get_phys_port_id)(struct net_device *dev,
1164 struct netdev_phys_item_id *ppid);
1165 void (*ndo_add_vxlan_port)(struct net_device *dev,
1166 sa_family_t sa_family,
1168 void (*ndo_del_vxlan_port)(struct net_device *dev,
1169 sa_family_t sa_family,
1172 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1173 struct net_device *dev);
1174 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1177 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1178 struct net_device *dev,
1180 int (*ndo_get_lock_subclass)(struct net_device *dev);
1181 bool (*ndo_gso_check) (struct sk_buff *skb,
1182 struct net_device *dev);
1183 #ifdef CONFIG_NET_SWITCHDEV
1184 int (*ndo_switch_parent_id_get)(struct net_device *dev,
1185 struct netdev_phys_item_id *psid);
1186 int (*ndo_switch_port_stp_update)(struct net_device *dev,
1192 * enum net_device_priv_flags - &struct net_device priv_flags
1194 * These are the &struct net_device, they are only set internally
1195 * by drivers and used in the kernel. These flags are invisible to
1196 * userspace, this means that the order of these flags can change
1197 * during any kernel release.
1199 * You should have a pretty good reason to be extending these flags.
1201 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1202 * @IFF_EBRIDGE: Ethernet bridging device
1203 * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
1204 * @IFF_MASTER_8023AD: bonding master, 802.3ad
1205 * @IFF_MASTER_ALB: bonding master, balance-alb
1206 * @IFF_BONDING: bonding master or slave
1207 * @IFF_SLAVE_NEEDARP: need ARPs for validation
1208 * @IFF_ISATAP: ISATAP interface (RFC4214)
1209 * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
1210 * @IFF_WAN_HDLC: WAN HDLC device
1211 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1213 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1214 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1215 * @IFF_MACVLAN_PORT: device used as macvlan port
1216 * @IFF_BRIDGE_PORT: device used as bridge port
1217 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1218 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1219 * @IFF_UNICAST_FLT: Supports unicast filtering
1220 * @IFF_TEAM_PORT: device used as team port
1221 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1222 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1223 * change when it's running
1224 * @IFF_MACVLAN: Macvlan device
1226 enum netdev_priv_flags {
1227 IFF_802_1Q_VLAN = 1<<0,
1229 IFF_SLAVE_INACTIVE = 1<<2,
1230 IFF_MASTER_8023AD = 1<<3,
1231 IFF_MASTER_ALB = 1<<4,
1233 IFF_SLAVE_NEEDARP = 1<<6,
1235 IFF_MASTER_ARPMON = 1<<8,
1236 IFF_WAN_HDLC = 1<<9,
1237 IFF_XMIT_DST_RELEASE = 1<<10,
1238 IFF_DONT_BRIDGE = 1<<11,
1239 IFF_DISABLE_NETPOLL = 1<<12,
1240 IFF_MACVLAN_PORT = 1<<13,
1241 IFF_BRIDGE_PORT = 1<<14,
1242 IFF_OVS_DATAPATH = 1<<15,
1243 IFF_TX_SKB_SHARING = 1<<16,
1244 IFF_UNICAST_FLT = 1<<17,
1245 IFF_TEAM_PORT = 1<<18,
1246 IFF_SUPP_NOFCS = 1<<19,
1247 IFF_LIVE_ADDR_CHANGE = 1<<20,
1248 IFF_MACVLAN = 1<<21,
1249 IFF_XMIT_DST_RELEASE_PERM = 1<<22,
1250 IFF_IPVLAN_MASTER = 1<<23,
1251 IFF_IPVLAN_SLAVE = 1<<24,
1254 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1255 #define IFF_EBRIDGE IFF_EBRIDGE
1256 #define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
1257 #define IFF_MASTER_8023AD IFF_MASTER_8023AD
1258 #define IFF_MASTER_ALB IFF_MASTER_ALB
1259 #define IFF_BONDING IFF_BONDING
1260 #define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
1261 #define IFF_ISATAP IFF_ISATAP
1262 #define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
1263 #define IFF_WAN_HDLC IFF_WAN_HDLC
1264 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1265 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1266 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1267 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1268 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1269 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1270 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1271 #define IFF_UNICAST_FLT IFF_UNICAST_FLT
1272 #define IFF_TEAM_PORT IFF_TEAM_PORT
1273 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1274 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1275 #define IFF_MACVLAN IFF_MACVLAN
1276 #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1277 #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1278 #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1281 * struct net_device - The DEVICE structure.
1282 * Actually, this whole structure is a big mistake. It mixes I/O
1283 * data with strictly "high-level" data, and it has to know about
1284 * almost every data structure used in the INET module.
1286 * @name: This is the first field of the "visible" part of this structure
1287 * (i.e. as seen by users in the "Space.c" file). It is the name
1290 * @name_hlist: Device name hash chain, please keep it close to name[]
1291 * @ifalias: SNMP alias
1292 * @mem_end: Shared memory end
1293 * @mem_start: Shared memory start
1294 * @base_addr: Device I/O address
1295 * @irq: Device IRQ number
1297 * @state: Generic network queuing layer state, see netdev_state_t
1298 * @dev_list: The global list of network devices
1299 * @napi_list: List entry, that is used for polling napi devices
1300 * @unreg_list: List entry, that is used, when we are unregistering the
1301 * device, see the function unregister_netdev
1302 * @close_list: List entry, that is used, when we are closing the device
1304 * @adj_list: Directly linked devices, like slaves for bonding
1305 * @all_adj_list: All linked devices, *including* neighbours
1306 * @features: Currently active device features
1307 * @hw_features: User-changeable features
1309 * @wanted_features: User-requested features
1310 * @vlan_features: Mask of features inheritable by VLAN devices
1312 * @hw_enc_features: Mask of features inherited by encapsulating devices
1313 * This field indicates what encapsulation
1314 * offloads the hardware is capable of doing,
1315 * and drivers will need to set them appropriately.
1317 * @mpls_features: Mask of features inheritable by MPLS
1319 * @ifindex: interface index
1320 * @iflink: unique device identifier
1322 * @stats: Statistics struct, which was left as a legacy, use
1323 * rtnl_link_stats64 instead
1325 * @rx_dropped: Dropped packets by core network,
1326 * do not use this in drivers
1327 * @tx_dropped: Dropped packets by core network,
1328 * do not use this in drivers
1330 * @carrier_changes: Stats to monitor carrier on<->off transitions
1332 * @wireless_handlers: List of functions to handle Wireless Extensions,
1334 * see <net/iw_handler.h> for details.
1335 * @wireless_data: Instance data managed by the core of wireless extensions
1337 * @netdev_ops: Includes several pointers to callbacks,
1338 * if one wants to override the ndo_*() functions
1339 * @ethtool_ops: Management operations
1340 * @fwd_ops: Management operations
1341 * @header_ops: Includes callbacks for creating,parsing,rebuilding,etc
1342 * of Layer 2 headers.
1344 * @flags: Interface flags (a la BSD)
1345 * @priv_flags: Like 'flags' but invisible to userspace,
1346 * see if.h for the definitions
1347 * @gflags: Global flags ( kept as legacy )
1348 * @padded: How much padding added by alloc_netdev()
1349 * @operstate: RFC2863 operstate
1350 * @link_mode: Mapping policy to operstate
1351 * @if_port: Selectable AUI, TP, ...
1353 * @mtu: Interface MTU value
1354 * @type: Interface hardware type
1355 * @hard_header_len: Hardware header length
1357 * @needed_headroom: Extra headroom the hardware may need, but not in all
1358 * cases can this be guaranteed
1359 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1360 * cases can this be guaranteed. Some cases also use
1361 * LL_MAX_HEADER instead to allocate the skb
1363 * interface address info:
1365 * @perm_addr: Permanent hw address
1366 * @addr_assign_type: Hw address assignment type
1367 * @addr_len: Hardware address length
1368 * @neigh_priv_len; Used in neigh_alloc(),
1369 * initialized only in atm/clip.c
1370 * @dev_id: Used to differentiate devices that share
1371 * the same link layer address
1372 * @dev_port: Used to differentiate devices that share
1374 * @addr_list_lock: XXX: need comments on this one
1375 * @uc: unicast mac addresses
1376 * @mc: multicast mac addresses
1377 * @dev_addrs: list of device hw addresses
1378 * @queues_kset: Group of all Kobjects in the Tx and RX queues
1379 * @uc_promisc: Counter, that indicates, that promiscuous mode
1380 * has been enabled due to the need to listen to
1381 * additional unicast addresses in a device that
1382 * does not implement ndo_set_rx_mode()
1383 * @promiscuity: Number of times, the NIC is told to work in
1384 * Promiscuous mode, if it becomes 0 the NIC will
1385 * exit from working in Promiscuous mode
1386 * @allmulti: Counter, enables or disables allmulticast mode
1388 * @vlan_info: VLAN info
1389 * @dsa_ptr: dsa specific data
1390 * @tipc_ptr: TIPC specific data
1391 * @atalk_ptr: AppleTalk link
1392 * @ip_ptr: IPv4 specific data
1393 * @dn_ptr: DECnet specific data
1394 * @ip6_ptr: IPv6 specific data
1395 * @ax25_ptr: AX.25 specific data
1396 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1398 * @last_rx: Time of last Rx
1399 * @dev_addr: Hw address (before bcast,
1400 * because most packets are unicast)
1402 * @_rx: Array of RX queues
1403 * @num_rx_queues: Number of RX queues
1404 * allocated at register_netdev() time
1405 * @real_num_rx_queues: Number of RX queues currently active in device
1407 * @rx_handler: handler for received packets
1408 * @rx_handler_data: XXX: need comments on this one
1409 * @ingress_queue: XXX: need comments on this one
1410 * @broadcast: hw bcast address
1412 * @_tx: Array of TX queues
1413 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1414 * @real_num_tx_queues: Number of TX queues currently active in device
1415 * @qdisc: Root qdisc from userspace point of view
1416 * @tx_queue_len: Max frames per queue allowed
1417 * @tx_global_lock: XXX: need comments on this one
1419 * @xps_maps: XXX: need comments on this one
1421 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1422 * indexed by RX queue number. Assigned by driver.
1423 * This must only be set if the ndo_rx_flow_steer
1424 * operation is defined
1426 * @trans_start: Time (in jiffies) of last Tx
1427 * @watchdog_timeo: Represents the timeout that is used by
1428 * the watchdog ( see dev_watchdog() )
1429 * @watchdog_timer: List of timers
1431 * @pcpu_refcnt: Number of references to this device
1432 * @todo_list: Delayed register/unregister
1433 * @index_hlist: Device index hash chain
1434 * @link_watch_list: XXX: need comments on this one
1436 * @reg_state: Register/unregister state machine
1437 * @dismantle: Device is going to be freed
1438 * @rtnl_link_state: This enum represents the phases of creating
1441 * @destructor: Called from unregister,
1442 * can be used to call free_netdev
1443 * @npinfo: XXX: need comments on this one
1444 * @nd_net: Network namespace this network device is inside
1446 * @ml_priv: Mid-layer private
1447 * @lstats: Loopback statistics
1448 * @tstats: Tunnel statistics
1449 * @dstats: Dummy statistics
1450 * @vstats: Virtual ethernet statistics
1455 * @dev: Class/net/name entry
1456 * @sysfs_groups: Space for optional device, statistics and wireless
1459 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1460 * @rtnl_link_ops: Rtnl_link_ops
1462 * @gso_max_size: Maximum size of generic segmentation offload
1463 * @gso_max_segs: Maximum number of segments that can be passed to the
1465 * @gso_min_segs: Minimum number of segments that can be passed to the
1468 * @dcbnl_ops: Data Center Bridging netlink ops
1469 * @num_tc: Number of traffic classes in the net device
1470 * @tc_to_txq: XXX: need comments on this one
1471 * @prio_tc_map XXX: need comments on this one
1473 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1475 * @priomap: XXX: need comments on this one
1476 * @phydev: Physical device may attach itself
1477 * for hardware timestamping
1479 * @qdisc_tx_busylock: XXX: need comments on this one
1481 * @group: The group, that the device belongs to
1482 * @pm_qos_req: Power Management QoS object
1484 * FIXME: cleanup struct net_device such that network protocol info
1489 char name[IFNAMSIZ];
1490 struct hlist_node name_hlist;
1493 * I/O specific fields
1494 * FIXME: Merge these and struct ifmap into one
1496 unsigned long mem_end;
1497 unsigned long mem_start;
1498 unsigned long base_addr;
1502 * Some hardware also needs these fields (state,dev_list,
1503 * napi_list,unreg_list,close_list) but they are not
1504 * part of the usual set specified in Space.c.
1507 unsigned long state;
1509 struct list_head dev_list;
1510 struct list_head napi_list;
1511 struct list_head unreg_list;
1512 struct list_head close_list;
1515 struct list_head upper;
1516 struct list_head lower;
1520 struct list_head upper;
1521 struct list_head lower;
1524 netdev_features_t features;
1525 netdev_features_t hw_features;
1526 netdev_features_t wanted_features;
1527 netdev_features_t vlan_features;
1528 netdev_features_t hw_enc_features;
1529 netdev_features_t mpls_features;
1534 struct net_device_stats stats;
1536 atomic_long_t rx_dropped;
1537 atomic_long_t tx_dropped;
1539 atomic_t carrier_changes;
1541 #ifdef CONFIG_WIRELESS_EXT
1542 const struct iw_handler_def * wireless_handlers;
1543 struct iw_public_data * wireless_data;
1545 const struct net_device_ops *netdev_ops;
1546 const struct ethtool_ops *ethtool_ops;
1547 const struct forwarding_accel_ops *fwd_ops;
1549 const struct header_ops *header_ops;
1552 unsigned int priv_flags;
1554 unsigned short gflags;
1555 unsigned short padded;
1557 unsigned char operstate;
1558 unsigned char link_mode;
1560 unsigned char if_port;
1564 unsigned short type;
1565 unsigned short hard_header_len;
1567 unsigned short needed_headroom;
1568 unsigned short needed_tailroom;
1570 /* Interface address info. */
1571 unsigned char perm_addr[MAX_ADDR_LEN];
1572 unsigned char addr_assign_type;
1573 unsigned char addr_len;
1574 unsigned short neigh_priv_len;
1575 unsigned short dev_id;
1576 unsigned short dev_port;
1577 spinlock_t addr_list_lock;
1578 struct netdev_hw_addr_list uc;
1579 struct netdev_hw_addr_list mc;
1580 struct netdev_hw_addr_list dev_addrs;
1583 struct kset *queues_kset;
1586 unsigned char name_assign_type;
1589 unsigned int promiscuity;
1590 unsigned int allmulti;
1593 /* Protocol specific pointers */
1595 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1596 struct vlan_info __rcu *vlan_info;
1598 #if IS_ENABLED(CONFIG_NET_DSA)
1599 struct dsa_switch_tree *dsa_ptr;
1601 #if IS_ENABLED(CONFIG_TIPC)
1602 struct tipc_bearer __rcu *tipc_ptr;
1605 struct in_device __rcu *ip_ptr;
1606 struct dn_dev __rcu *dn_ptr;
1607 struct inet6_dev __rcu *ip6_ptr;
1609 struct wireless_dev *ieee80211_ptr;
1610 struct wpan_dev *ieee802154_ptr;
1613 * Cache lines mostly used on receive path (including eth_type_trans())
1615 unsigned long last_rx;
1617 /* Interface address info used in eth_type_trans() */
1618 unsigned char *dev_addr;
1622 struct netdev_rx_queue *_rx;
1624 unsigned int num_rx_queues;
1625 unsigned int real_num_rx_queues;
1629 unsigned long gro_flush_timeout;
1630 rx_handler_func_t __rcu *rx_handler;
1631 void __rcu *rx_handler_data;
1633 struct netdev_queue __rcu *ingress_queue;
1634 unsigned char broadcast[MAX_ADDR_LEN];
1638 * Cache lines mostly used on transmit path
1640 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1641 unsigned int num_tx_queues;
1642 unsigned int real_num_tx_queues;
1643 struct Qdisc *qdisc;
1644 unsigned long tx_queue_len;
1645 spinlock_t tx_global_lock;
1648 struct xps_dev_maps __rcu *xps_maps;
1650 #ifdef CONFIG_RFS_ACCEL
1651 struct cpu_rmap *rx_cpu_rmap;
1654 /* These may be needed for future network-power-down code. */
1657 * trans_start here is expensive for high speed devices on SMP,
1658 * please use netdev_queue->trans_start instead.
1660 unsigned long trans_start;
1663 struct timer_list watchdog_timer;
1665 int __percpu *pcpu_refcnt;
1666 struct list_head todo_list;
1668 struct hlist_node index_hlist;
1669 struct list_head link_watch_list;
1671 enum { NETREG_UNINITIALIZED=0,
1672 NETREG_REGISTERED, /* completed register_netdevice */
1673 NETREG_UNREGISTERING, /* called unregister_netdevice */
1674 NETREG_UNREGISTERED, /* completed unregister todo */
1675 NETREG_RELEASED, /* called free_netdev */
1676 NETREG_DUMMY, /* dummy device for NAPI poll */
1682 RTNL_LINK_INITIALIZED,
1683 RTNL_LINK_INITIALIZING,
1684 } rtnl_link_state:16;
1686 void (*destructor)(struct net_device *dev);
1688 #ifdef CONFIG_NETPOLL
1689 struct netpoll_info __rcu *npinfo;
1692 #ifdef CONFIG_NET_NS
1696 /* mid-layer private */
1699 struct pcpu_lstats __percpu *lstats;
1700 struct pcpu_sw_netstats __percpu *tstats;
1701 struct pcpu_dstats __percpu *dstats;
1702 struct pcpu_vstats __percpu *vstats;
1705 struct garp_port __rcu *garp_port;
1706 struct mrp_port __rcu *mrp_port;
1709 const struct attribute_group *sysfs_groups[4];
1710 const struct attribute_group *sysfs_rx_queue_group;
1712 const struct rtnl_link_ops *rtnl_link_ops;
1714 /* for setting kernel sock attribute on TCP connection setup */
1715 #define GSO_MAX_SIZE 65536
1716 unsigned int gso_max_size;
1717 #define GSO_MAX_SEGS 65535
1721 const struct dcbnl_rtnl_ops *dcbnl_ops;
1724 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1725 u8 prio_tc_map[TC_BITMASK + 1];
1727 #if IS_ENABLED(CONFIG_FCOE)
1728 unsigned int fcoe_ddp_xid;
1730 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1731 struct netprio_map __rcu *priomap;
1733 struct phy_device *phydev;
1734 struct lock_class_key *qdisc_tx_busylock;
1736 struct pm_qos_request pm_qos_req;
1738 #define to_net_dev(d) container_of(d, struct net_device, dev)
1740 #define NETDEV_ALIGN 32
1743 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1745 return dev->prio_tc_map[prio & TC_BITMASK];
1749 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1751 if (tc >= dev->num_tc)
1754 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1759 void netdev_reset_tc(struct net_device *dev)
1762 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1763 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1767 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1769 if (tc >= dev->num_tc)
1772 dev->tc_to_txq[tc].count = count;
1773 dev->tc_to_txq[tc].offset = offset;
1778 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1780 if (num_tc > TC_MAX_QUEUE)
1783 dev->num_tc = num_tc;
1788 int netdev_get_num_tc(struct net_device *dev)
1794 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1797 return &dev->_tx[index];
1800 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
1801 const struct sk_buff *skb)
1803 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
1806 static inline void netdev_for_each_tx_queue(struct net_device *dev,
1807 void (*f)(struct net_device *,
1808 struct netdev_queue *,
1814 for (i = 0; i < dev->num_tx_queues; i++)
1815 f(dev, &dev->_tx[i], arg);
1818 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1819 struct sk_buff *skb,
1823 * Net namespace inlines
1826 struct net *dev_net(const struct net_device *dev)
1828 return read_pnet(&dev->nd_net);
1832 void dev_net_set(struct net_device *dev, struct net *net)
1834 #ifdef CONFIG_NET_NS
1835 release_net(dev->nd_net);
1836 dev->nd_net = hold_net(net);
1840 static inline bool netdev_uses_dsa(struct net_device *dev)
1842 #if IS_ENABLED(CONFIG_NET_DSA)
1843 if (dev->dsa_ptr != NULL)
1844 return dsa_uses_tagged_protocol(dev->dsa_ptr);
1850 * netdev_priv - access network device private data
1851 * @dev: network device
1853 * Get network device private data
1855 static inline void *netdev_priv(const struct net_device *dev)
1857 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1860 /* Set the sysfs physical device reference for the network logical device
1861 * if set prior to registration will cause a symlink during initialization.
1863 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1865 /* Set the sysfs device type for the network logical device to allow
1866 * fine-grained identification of different network device types. For
1867 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1869 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1871 /* Default NAPI poll() weight
1872 * Device drivers are strongly advised to not use bigger value
1874 #define NAPI_POLL_WEIGHT 64
1877 * netif_napi_add - initialize a napi context
1878 * @dev: network device
1879 * @napi: napi context
1880 * @poll: polling function
1881 * @weight: default weight
1883 * netif_napi_add() must be used to initialize a napi context prior to calling
1884 * *any* of the other napi related functions.
1886 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1887 int (*poll)(struct napi_struct *, int), int weight);
1890 * netif_napi_del - remove a napi context
1891 * @napi: napi context
1893 * netif_napi_del() removes a napi context from the network device napi list
1895 void netif_napi_del(struct napi_struct *napi);
1897 struct napi_gro_cb {
1898 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1901 /* Length of frag0. */
1902 unsigned int frag0_len;
1904 /* This indicates where we are processing relative to skb->data. */
1907 /* This is non-zero if the packet cannot be merged with the new skb. */
1910 /* Save the IP ID here and check when we get to the transport layer */
1913 /* Number of segments aggregated. */
1916 /* This is non-zero if the packet may be of the same flow. */
1921 #define NAPI_GRO_FREE 1
1922 #define NAPI_GRO_FREE_STOLEN_HEAD 2
1924 /* jiffies when first packet was created/queued */
1927 /* Used in ipv6_gro_receive() and foo-over-udp */
1930 /* Used in udp_gro_receive */
1933 /* GRO checksum is valid */
1936 /* Number of checksums via CHECKSUM_UNNECESSARY */
1939 /* Used in foo-over-udp, set in udp[46]_gro_receive */
1942 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
1945 /* used in skb_gro_receive() slow path */
1946 struct sk_buff *last;
1949 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1951 struct packet_type {
1952 __be16 type; /* This is really htons(ether_type). */
1953 struct net_device *dev; /* NULL is wildcarded here */
1954 int (*func) (struct sk_buff *,
1955 struct net_device *,
1956 struct packet_type *,
1957 struct net_device *);
1958 bool (*id_match)(struct packet_type *ptype,
1960 void *af_packet_priv;
1961 struct list_head list;
1964 struct offload_callbacks {
1965 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1966 netdev_features_t features);
1967 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1968 struct sk_buff *skb);
1969 int (*gro_complete)(struct sk_buff *skb, int nhoff);
1972 struct packet_offload {
1973 __be16 type; /* This is really htons(ether_type). */
1974 struct offload_callbacks callbacks;
1975 struct list_head list;
1978 struct udp_offload {
1981 struct offload_callbacks callbacks;
1984 /* often modified stats are per cpu, other are shared (netdev->stats) */
1985 struct pcpu_sw_netstats {
1990 struct u64_stats_sync syncp;
1993 #define netdev_alloc_pcpu_stats(type) \
1995 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
1998 for_each_possible_cpu(i) { \
1999 typeof(type) *stat; \
2000 stat = per_cpu_ptr(pcpu_stats, i); \
2001 u64_stats_init(&stat->syncp); \
2007 #include <linux/notifier.h>
2009 /* netdevice notifier chain. Please remember to update the rtnetlink
2010 * notification exclusion list in rtnetlink_event() when adding new
2013 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
2014 #define NETDEV_DOWN 0x0002
2015 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
2016 detected a hardware crash and restarted
2017 - we can use this eg to kick tcp sessions
2019 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
2020 #define NETDEV_REGISTER 0x0005
2021 #define NETDEV_UNREGISTER 0x0006
2022 #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
2023 #define NETDEV_CHANGEADDR 0x0008
2024 #define NETDEV_GOING_DOWN 0x0009
2025 #define NETDEV_CHANGENAME 0x000A
2026 #define NETDEV_FEAT_CHANGE 0x000B
2027 #define NETDEV_BONDING_FAILOVER 0x000C
2028 #define NETDEV_PRE_UP 0x000D
2029 #define NETDEV_PRE_TYPE_CHANGE 0x000E
2030 #define NETDEV_POST_TYPE_CHANGE 0x000F
2031 #define NETDEV_POST_INIT 0x0010
2032 #define NETDEV_UNREGISTER_FINAL 0x0011
2033 #define NETDEV_RELEASE 0x0012
2034 #define NETDEV_NOTIFY_PEERS 0x0013
2035 #define NETDEV_JOIN 0x0014
2036 #define NETDEV_CHANGEUPPER 0x0015
2037 #define NETDEV_RESEND_IGMP 0x0016
2038 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
2039 #define NETDEV_CHANGEINFODATA 0x0018
2041 int register_netdevice_notifier(struct notifier_block *nb);
2042 int unregister_netdevice_notifier(struct notifier_block *nb);
2044 struct netdev_notifier_info {
2045 struct net_device *dev;
2048 struct netdev_notifier_change_info {
2049 struct netdev_notifier_info info; /* must be first */
2050 unsigned int flags_changed;
2053 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2054 struct net_device *dev)
2059 static inline struct net_device *
2060 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2065 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2068 extern rwlock_t dev_base_lock; /* Device list lock */
2070 #define for_each_netdev(net, d) \
2071 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2072 #define for_each_netdev_reverse(net, d) \
2073 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2074 #define for_each_netdev_rcu(net, d) \
2075 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2076 #define for_each_netdev_safe(net, d, n) \
2077 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2078 #define for_each_netdev_continue(net, d) \
2079 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2080 #define for_each_netdev_continue_rcu(net, d) \
2081 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2082 #define for_each_netdev_in_bond_rcu(bond, slave) \
2083 for_each_netdev_rcu(&init_net, slave) \
2084 if (netdev_master_upper_dev_get_rcu(slave) == bond)
2085 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2087 static inline struct net_device *next_net_device(struct net_device *dev)
2089 struct list_head *lh;
2093 lh = dev->dev_list.next;
2094 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2097 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2099 struct list_head *lh;
2103 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2104 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2107 static inline struct net_device *first_net_device(struct net *net)
2109 return list_empty(&net->dev_base_head) ? NULL :
2110 net_device_entry(net->dev_base_head.next);
2113 static inline struct net_device *first_net_device_rcu(struct net *net)
2115 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2117 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2120 int netdev_boot_setup_check(struct net_device *dev);
2121 unsigned long netdev_boot_base(const char *prefix, int unit);
2122 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2123 const char *hwaddr);
2124 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2125 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2126 void dev_add_pack(struct packet_type *pt);
2127 void dev_remove_pack(struct packet_type *pt);
2128 void __dev_remove_pack(struct packet_type *pt);
2129 void dev_add_offload(struct packet_offload *po);
2130 void dev_remove_offload(struct packet_offload *po);
2132 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2133 unsigned short mask);
2134 struct net_device *dev_get_by_name(struct net *net, const char *name);
2135 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2136 struct net_device *__dev_get_by_name(struct net *net, const char *name);
2137 int dev_alloc_name(struct net_device *dev, const char *name);
2138 int dev_open(struct net_device *dev);
2139 int dev_close(struct net_device *dev);
2140 void dev_disable_lro(struct net_device *dev);
2141 int dev_loopback_xmit(struct sk_buff *newskb);
2142 int dev_queue_xmit(struct sk_buff *skb);
2143 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
2144 int register_netdevice(struct net_device *dev);
2145 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2146 void unregister_netdevice_many(struct list_head *head);
2147 static inline void unregister_netdevice(struct net_device *dev)
2149 unregister_netdevice_queue(dev, NULL);
2152 int netdev_refcnt_read(const struct net_device *dev);
2153 void free_netdev(struct net_device *dev);
2154 void netdev_freemem(struct net_device *dev);
2155 void synchronize_net(void);
2156 int init_dummy_netdev(struct net_device *dev);
2158 struct net_device *dev_get_by_index(struct net *net, int ifindex);
2159 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2160 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2161 int netdev_get_name(struct net *net, char *name, int ifindex);
2162 int dev_restart(struct net_device *dev);
2163 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
2165 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2167 return NAPI_GRO_CB(skb)->data_offset;
2170 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2172 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2175 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2177 NAPI_GRO_CB(skb)->data_offset += len;
2180 static inline void *skb_gro_header_fast(struct sk_buff *skb,
2181 unsigned int offset)
2183 return NAPI_GRO_CB(skb)->frag0 + offset;
2186 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2188 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2191 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2192 unsigned int offset)
2194 if (!pskb_may_pull(skb, hlen))
2197 NAPI_GRO_CB(skb)->frag0 = NULL;
2198 NAPI_GRO_CB(skb)->frag0_len = 0;
2199 return skb->data + offset;
2202 static inline void *skb_gro_network_header(struct sk_buff *skb)
2204 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2205 skb_network_offset(skb);
2208 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2209 const void *start, unsigned int len)
2211 if (NAPI_GRO_CB(skb)->csum_valid)
2212 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2213 csum_partial(start, len, 0));
2216 /* GRO checksum functions. These are logical equivalents of the normal
2217 * checksum functions (in skbuff.h) except that they operate on the GRO
2218 * offsets and fields in sk_buff.
2221 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2223 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2227 return (skb->ip_summed != CHECKSUM_PARTIAL &&
2228 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2229 (!zero_okay || check));
2232 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2235 if (NAPI_GRO_CB(skb)->csum_valid &&
2236 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2239 NAPI_GRO_CB(skb)->csum = psum;
2241 return __skb_gro_checksum_complete(skb);
2244 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2246 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2247 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2248 NAPI_GRO_CB(skb)->csum_cnt--;
2250 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2251 * verified a new top level checksum or an encapsulated one
2252 * during GRO. This saves work if we fallback to normal path.
2254 __skb_incr_checksum_unnecessary(skb);
2258 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2261 __sum16 __ret = 0; \
2262 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2263 __ret = __skb_gro_checksum_validate_complete(skb, \
2264 compute_pseudo(skb, proto)); \
2266 __skb_mark_checksum_bad(skb); \
2268 skb_gro_incr_csum_unnecessary(skb); \
2272 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2273 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2275 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2277 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2279 #define skb_gro_checksum_simple_validate(skb) \
2280 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2282 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2284 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2285 !NAPI_GRO_CB(skb)->csum_valid);
2288 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2289 __sum16 check, __wsum pseudo)
2291 NAPI_GRO_CB(skb)->csum = ~pseudo;
2292 NAPI_GRO_CB(skb)->csum_valid = 1;
2295 #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2297 if (__skb_gro_checksum_convert_check(skb)) \
2298 __skb_gro_checksum_convert(skb, check, \
2299 compute_pseudo(skb, proto)); \
2302 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2303 unsigned short type,
2304 const void *daddr, const void *saddr,
2307 if (!dev->header_ops || !dev->header_ops->create)
2310 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2313 static inline int dev_parse_header(const struct sk_buff *skb,
2314 unsigned char *haddr)
2316 const struct net_device *dev = skb->dev;
2318 if (!dev->header_ops || !dev->header_ops->parse)
2320 return dev->header_ops->parse(skb, haddr);
2323 static inline int dev_rebuild_header(struct sk_buff *skb)
2325 const struct net_device *dev = skb->dev;
2327 if (!dev->header_ops || !dev->header_ops->rebuild)
2329 return dev->header_ops->rebuild(skb);
2332 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
2333 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2334 static inline int unregister_gifconf(unsigned int family)
2336 return register_gifconf(family, NULL);
2339 #ifdef CONFIG_NET_FLOW_LIMIT
2340 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
2341 struct sd_flow_limit {
2343 unsigned int num_buckets;
2344 unsigned int history_head;
2345 u16 history[FLOW_LIMIT_HISTORY];
2349 extern int netdev_flow_limit_table_len;
2350 #endif /* CONFIG_NET_FLOW_LIMIT */
2353 * Incoming packets are placed on per-cpu queues
2355 struct softnet_data {
2356 struct list_head poll_list;
2357 struct sk_buff_head process_queue;
2360 unsigned int processed;
2361 unsigned int time_squeeze;
2362 unsigned int cpu_collision;
2363 unsigned int received_rps;
2365 struct softnet_data *rps_ipi_list;
2367 #ifdef CONFIG_NET_FLOW_LIMIT
2368 struct sd_flow_limit __rcu *flow_limit;
2370 struct Qdisc *output_queue;
2371 struct Qdisc **output_queue_tailp;
2372 struct sk_buff *completion_queue;
2375 /* Elements below can be accessed between CPUs for RPS */
2376 struct call_single_data csd ____cacheline_aligned_in_smp;
2377 struct softnet_data *rps_ipi_next;
2379 unsigned int input_queue_head;
2380 unsigned int input_queue_tail;
2382 unsigned int dropped;
2383 struct sk_buff_head input_pkt_queue;
2384 struct napi_struct backlog;
2388 static inline void input_queue_head_incr(struct softnet_data *sd)
2391 sd->input_queue_head++;
2395 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2396 unsigned int *qtail)
2399 *qtail = ++sd->input_queue_tail;
2403 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2405 void __netif_schedule(struct Qdisc *q);
2406 void netif_schedule_queue(struct netdev_queue *txq);
2408 static inline void netif_tx_schedule_all(struct net_device *dev)
2412 for (i = 0; i < dev->num_tx_queues; i++)
2413 netif_schedule_queue(netdev_get_tx_queue(dev, i));
2416 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
2418 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2422 * netif_start_queue - allow transmit
2423 * @dev: network device
2425 * Allow upper layers to call the device hard_start_xmit routine.
2427 static inline void netif_start_queue(struct net_device *dev)
2429 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
2432 static inline void netif_tx_start_all_queues(struct net_device *dev)
2436 for (i = 0; i < dev->num_tx_queues; i++) {
2437 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2438 netif_tx_start_queue(txq);
2442 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2445 * netif_wake_queue - restart transmit
2446 * @dev: network device
2448 * Allow upper layers to call the device hard_start_xmit routine.
2449 * Used for flow control when transmit resources are available.
2451 static inline void netif_wake_queue(struct net_device *dev)
2453 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
2456 static inline void netif_tx_wake_all_queues(struct net_device *dev)
2460 for (i = 0; i < dev->num_tx_queues; i++) {
2461 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2462 netif_tx_wake_queue(txq);
2466 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2468 if (WARN_ON(!dev_queue)) {
2469 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
2472 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2476 * netif_stop_queue - stop transmitted packets
2477 * @dev: network device
2479 * Stop upper layers calling the device hard_start_xmit routine.
2480 * Used for flow control when transmit resources are unavailable.
2482 static inline void netif_stop_queue(struct net_device *dev)
2484 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2487 static inline void netif_tx_stop_all_queues(struct net_device *dev)
2491 for (i = 0; i < dev->num_tx_queues; i++) {
2492 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2493 netif_tx_stop_queue(txq);
2497 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2499 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2503 * netif_queue_stopped - test if transmit queue is flowblocked
2504 * @dev: network device
2506 * Test if transmit queue on device is currently unable to send.
2508 static inline bool netif_queue_stopped(const struct net_device *dev)
2510 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2513 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2515 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2519 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2521 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2525 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2527 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2531 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
2532 * @dev_queue: pointer to transmit queue
2534 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
2535 * to give appropriate hint to the cpu.
2537 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
2540 prefetchw(&dev_queue->dql.num_queued);
2545 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
2546 * @dev_queue: pointer to transmit queue
2548 * BQL enabled drivers might use this helper in their TX completion path,
2549 * to give appropriate hint to the cpu.
2551 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
2554 prefetchw(&dev_queue->dql.limit);
2558 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2562 dql_queued(&dev_queue->dql, bytes);
2564 if (likely(dql_avail(&dev_queue->dql) >= 0))
2567 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2570 * The XOFF flag must be set before checking the dql_avail below,
2571 * because in netdev_tx_completed_queue we update the dql_completed
2572 * before checking the XOFF flag.
2576 /* check again in case another CPU has just made room avail */
2577 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2578 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2583 * netdev_sent_queue - report the number of bytes queued to hardware
2584 * @dev: network device
2585 * @bytes: number of bytes queued to the hardware device queue
2587 * Report the number of bytes queued for sending/completion to the network
2588 * device hardware queue. @bytes should be a good approximation and should
2589 * exactly match netdev_completed_queue() @bytes
2591 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2593 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2596 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2597 unsigned int pkts, unsigned int bytes)
2600 if (unlikely(!bytes))
2603 dql_completed(&dev_queue->dql, bytes);
2606 * Without the memory barrier there is a small possiblity that
2607 * netdev_tx_sent_queue will miss the update and cause the queue to
2608 * be stopped forever
2612 if (dql_avail(&dev_queue->dql) < 0)
2615 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2616 netif_schedule_queue(dev_queue);
2621 * netdev_completed_queue - report bytes and packets completed by device
2622 * @dev: network device
2623 * @pkts: actual number of packets sent over the medium
2624 * @bytes: actual number of bytes sent over the medium
2626 * Report the number of bytes and packets transmitted by the network device
2627 * hardware queue over the physical medium, @bytes must exactly match the
2628 * @bytes amount passed to netdev_sent_queue()
2630 static inline void netdev_completed_queue(struct net_device *dev,
2631 unsigned int pkts, unsigned int bytes)
2633 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2636 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2639 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
2645 * netdev_reset_queue - reset the packets and bytes count of a network device
2646 * @dev_queue: network device
2648 * Reset the bytes and packet count of a network device and clear the
2649 * software flow control OFF bit for this network device
2651 static inline void netdev_reset_queue(struct net_device *dev_queue)
2653 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
2657 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
2658 * @dev: network device
2659 * @queue_index: given tx queue index
2661 * Returns 0 if given tx queue index >= number of device tx queues,
2662 * otherwise returns the originally passed tx queue index.
2664 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
2666 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2667 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2668 dev->name, queue_index,
2669 dev->real_num_tx_queues);
2677 * netif_running - test if up
2678 * @dev: network device
2680 * Test if the device has been brought up.
2682 static inline bool netif_running(const struct net_device *dev)
2684 return test_bit(__LINK_STATE_START, &dev->state);
2688 * Routines to manage the subqueues on a device. We only need start
2689 * stop, and a check if it's stopped. All other device management is
2690 * done at the overall netdevice level.
2691 * Also test the device if we're multiqueue.
2695 * netif_start_subqueue - allow sending packets on subqueue
2696 * @dev: network device
2697 * @queue_index: sub queue index
2699 * Start individual transmit queue of a device with multiple transmit queues.
2701 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2703 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2705 netif_tx_start_queue(txq);
2709 * netif_stop_subqueue - stop sending packets on subqueue
2710 * @dev: network device
2711 * @queue_index: sub queue index
2713 * Stop individual transmit queue of a device with multiple transmit queues.
2715 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2717 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2718 netif_tx_stop_queue(txq);
2722 * netif_subqueue_stopped - test status of subqueue
2723 * @dev: network device
2724 * @queue_index: sub queue index
2726 * Check individual transmit queue of a device with multiple transmit queues.
2728 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2731 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2733 return netif_tx_queue_stopped(txq);
2736 static inline bool netif_subqueue_stopped(const struct net_device *dev,
2737 struct sk_buff *skb)
2739 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2742 void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
2745 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2748 static inline int netif_set_xps_queue(struct net_device *dev,
2749 const struct cpumask *mask,
2757 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2758 * as a distribution range limit for the returned value.
2760 static inline u16 skb_tx_hash(const struct net_device *dev,
2761 struct sk_buff *skb)
2763 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2767 * netif_is_multiqueue - test if device has multiple transmit queues
2768 * @dev: network device
2770 * Check if device has multiple transmit queues
2772 static inline bool netif_is_multiqueue(const struct net_device *dev)
2774 return dev->num_tx_queues > 1;
2777 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
2780 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
2782 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2790 static inline unsigned int get_netdev_rx_queue_index(
2791 struct netdev_rx_queue *queue)
2793 struct net_device *dev = queue->dev;
2794 int index = queue - dev->_rx;
2796 BUG_ON(index >= dev->num_rx_queues);
2801 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2802 int netif_get_num_default_rss_queues(void);
2804 enum skb_free_reason {
2805 SKB_REASON_CONSUMED,
2809 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
2810 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
2813 * It is not allowed to call kfree_skb() or consume_skb() from hardware
2814 * interrupt context or with hardware interrupts being disabled.
2815 * (in_irq() || irqs_disabled())
2817 * We provide four helpers that can be used in following contexts :
2819 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
2820 * replacing kfree_skb(skb)
2822 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
2823 * Typically used in place of consume_skb(skb) in TX completion path
2825 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
2826 * replacing kfree_skb(skb)
2828 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
2829 * and consumed a packet. Used in place of consume_skb(skb)
2831 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
2833 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
2836 static inline void dev_consume_skb_irq(struct sk_buff *skb)
2838 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
2841 static inline void dev_kfree_skb_any(struct sk_buff *skb)
2843 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
2846 static inline void dev_consume_skb_any(struct sk_buff *skb)
2848 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
2851 int netif_rx(struct sk_buff *skb);
2852 int netif_rx_ni(struct sk_buff *skb);
2853 int netif_receive_skb(struct sk_buff *skb);
2854 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
2855 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
2856 struct sk_buff *napi_get_frags(struct napi_struct *napi);
2857 gro_result_t napi_gro_frags(struct napi_struct *napi);
2858 struct packet_offload *gro_find_receive_by_type(__be16 type);
2859 struct packet_offload *gro_find_complete_by_type(__be16 type);
2861 static inline void napi_free_frags(struct napi_struct *napi)
2863 kfree_skb(napi->skb);
2867 int netdev_rx_handler_register(struct net_device *dev,
2868 rx_handler_func_t *rx_handler,
2869 void *rx_handler_data);
2870 void netdev_rx_handler_unregister(struct net_device *dev);
2872 bool dev_valid_name(const char *name);
2873 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2874 int dev_ethtool(struct net *net, struct ifreq *);
2875 unsigned int dev_get_flags(const struct net_device *);
2876 int __dev_change_flags(struct net_device *, unsigned int flags);
2877 int dev_change_flags(struct net_device *, unsigned int);
2878 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
2879 unsigned int gchanges);
2880 int dev_change_name(struct net_device *, const char *);
2881 int dev_set_alias(struct net_device *, const char *, size_t);
2882 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
2883 int dev_set_mtu(struct net_device *, int);
2884 void dev_set_group(struct net_device *, int);
2885 int dev_set_mac_address(struct net_device *, struct sockaddr *);
2886 int dev_change_carrier(struct net_device *, bool new_carrier);
2887 int dev_get_phys_port_id(struct net_device *dev,
2888 struct netdev_phys_item_id *ppid);
2889 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
2890 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2891 struct netdev_queue *txq, int *ret);
2892 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2893 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2894 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
2896 extern int netdev_budget;
2898 /* Called by rtnetlink.c:rtnl_unlock() */
2899 void netdev_run_todo(void);
2902 * dev_put - release reference to device
2903 * @dev: network device
2905 * Release reference to device to allow it to be freed.
2907 static inline void dev_put(struct net_device *dev)
2909 this_cpu_dec(*dev->pcpu_refcnt);
2913 * dev_hold - get reference to device
2914 * @dev: network device
2916 * Hold reference to device to keep it from being freed.
2918 static inline void dev_hold(struct net_device *dev)
2920 this_cpu_inc(*dev->pcpu_refcnt);
2923 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
2924 * and _off may be called from IRQ context, but it is caller
2925 * who is responsible for serialization of these calls.
2927 * The name carrier is inappropriate, these functions should really be
2928 * called netif_lowerlayer_*() because they represent the state of any
2929 * kind of lower layer not just hardware media.
2932 void linkwatch_init_dev(struct net_device *dev);
2933 void linkwatch_fire_event(struct net_device *dev);
2934 void linkwatch_forget_dev(struct net_device *dev);
2937 * netif_carrier_ok - test if carrier present
2938 * @dev: network device
2940 * Check if carrier is present on device
2942 static inline bool netif_carrier_ok(const struct net_device *dev)
2944 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2947 unsigned long dev_trans_start(struct net_device *dev);
2949 void __netdev_watchdog_up(struct net_device *dev);
2951 void netif_carrier_on(struct net_device *dev);
2953 void netif_carrier_off(struct net_device *dev);
2956 * netif_dormant_on - mark device as dormant.
2957 * @dev: network device
2959 * Mark device as dormant (as per RFC2863).
2961 * The dormant state indicates that the relevant interface is not
2962 * actually in a condition to pass packets (i.e., it is not 'up') but is
2963 * in a "pending" state, waiting for some external event. For "on-
2964 * demand" interfaces, this new state identifies the situation where the
2965 * interface is waiting for events to place it in the up state.
2968 static inline void netif_dormant_on(struct net_device *dev)
2970 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2971 linkwatch_fire_event(dev);
2975 * netif_dormant_off - set device as not dormant.
2976 * @dev: network device
2978 * Device is not in dormant state.
2980 static inline void netif_dormant_off(struct net_device *dev)
2982 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2983 linkwatch_fire_event(dev);
2987 * netif_dormant - test if carrier present
2988 * @dev: network device
2990 * Check if carrier is present on device
2992 static inline bool netif_dormant(const struct net_device *dev)
2994 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2999 * netif_oper_up - test if device is operational
3000 * @dev: network device
3002 * Check if carrier is operational
3004 static inline bool netif_oper_up(const struct net_device *dev)
3006 return (dev->operstate == IF_OPER_UP ||
3007 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
3011 * netif_device_present - is device available or removed
3012 * @dev: network device
3014 * Check if device has not been removed from system.
3016 static inline bool netif_device_present(struct net_device *dev)
3018 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3021 void netif_device_detach(struct net_device *dev);
3023 void netif_device_attach(struct net_device *dev);
3026 * Network interface message level settings
3030 NETIF_MSG_DRV = 0x0001,
3031 NETIF_MSG_PROBE = 0x0002,
3032 NETIF_MSG_LINK = 0x0004,
3033 NETIF_MSG_TIMER = 0x0008,
3034 NETIF_MSG_IFDOWN = 0x0010,
3035 NETIF_MSG_IFUP = 0x0020,
3036 NETIF_MSG_RX_ERR = 0x0040,
3037 NETIF_MSG_TX_ERR = 0x0080,
3038 NETIF_MSG_TX_QUEUED = 0x0100,
3039 NETIF_MSG_INTR = 0x0200,
3040 NETIF_MSG_TX_DONE = 0x0400,
3041 NETIF_MSG_RX_STATUS = 0x0800,
3042 NETIF_MSG_PKTDATA = 0x1000,
3043 NETIF_MSG_HW = 0x2000,
3044 NETIF_MSG_WOL = 0x4000,
3047 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3048 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3049 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3050 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3051 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3052 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3053 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3054 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3055 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3056 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3057 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3058 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3059 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3060 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3061 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3063 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3066 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3067 return default_msg_enable_bits;
3068 if (debug_value == 0) /* no output */
3070 /* set low N bits */
3071 return (1 << debug_value) - 1;
3074 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
3076 spin_lock(&txq->_xmit_lock);
3077 txq->xmit_lock_owner = cpu;
3080 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3082 spin_lock_bh(&txq->_xmit_lock);
3083 txq->xmit_lock_owner = smp_processor_id();
3086 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3088 bool ok = spin_trylock(&txq->_xmit_lock);
3090 txq->xmit_lock_owner = smp_processor_id();
3094 static inline void __netif_tx_unlock(struct netdev_queue *txq)
3096 txq->xmit_lock_owner = -1;
3097 spin_unlock(&txq->_xmit_lock);
3100 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3102 txq->xmit_lock_owner = -1;
3103 spin_unlock_bh(&txq->_xmit_lock);
3106 static inline void txq_trans_update(struct netdev_queue *txq)
3108 if (txq->xmit_lock_owner != -1)
3109 txq->trans_start = jiffies;
3113 * netif_tx_lock - grab network device transmit lock
3114 * @dev: network device
3116 * Get network device transmit lock
3118 static inline void netif_tx_lock(struct net_device *dev)
3123 spin_lock(&dev->tx_global_lock);
3124 cpu = smp_processor_id();
3125 for (i = 0; i < dev->num_tx_queues; i++) {
3126 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3128 /* We are the only thread of execution doing a
3129 * freeze, but we have to grab the _xmit_lock in
3130 * order to synchronize with threads which are in
3131 * the ->hard_start_xmit() handler and already
3132 * checked the frozen bit.
3134 __netif_tx_lock(txq, cpu);
3135 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3136 __netif_tx_unlock(txq);
3140 static inline void netif_tx_lock_bh(struct net_device *dev)
3146 static inline void netif_tx_unlock(struct net_device *dev)
3150 for (i = 0; i < dev->num_tx_queues; i++) {
3151 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3153 /* No need to grab the _xmit_lock here. If the
3154 * queue is not stopped for another reason, we
3157 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
3158 netif_schedule_queue(txq);
3160 spin_unlock(&dev->tx_global_lock);
3163 static inline void netif_tx_unlock_bh(struct net_device *dev)
3165 netif_tx_unlock(dev);
3169 #define HARD_TX_LOCK(dev, txq, cpu) { \
3170 if ((dev->features & NETIF_F_LLTX) == 0) { \
3171 __netif_tx_lock(txq, cpu); \
3175 #define HARD_TX_TRYLOCK(dev, txq) \
3176 (((dev->features & NETIF_F_LLTX) == 0) ? \
3177 __netif_tx_trylock(txq) : \
3180 #define HARD_TX_UNLOCK(dev, txq) { \
3181 if ((dev->features & NETIF_F_LLTX) == 0) { \
3182 __netif_tx_unlock(txq); \
3186 static inline void netif_tx_disable(struct net_device *dev)
3192 cpu = smp_processor_id();
3193 for (i = 0; i < dev->num_tx_queues; i++) {
3194 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3196 __netif_tx_lock(txq, cpu);
3197 netif_tx_stop_queue(txq);
3198 __netif_tx_unlock(txq);
3203 static inline void netif_addr_lock(struct net_device *dev)
3205 spin_lock(&dev->addr_list_lock);
3208 static inline void netif_addr_lock_nested(struct net_device *dev)
3210 int subclass = SINGLE_DEPTH_NESTING;
3212 if (dev->netdev_ops->ndo_get_lock_subclass)
3213 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
3215 spin_lock_nested(&dev->addr_list_lock, subclass);
3218 static inline void netif_addr_lock_bh(struct net_device *dev)
3220 spin_lock_bh(&dev->addr_list_lock);
3223 static inline void netif_addr_unlock(struct net_device *dev)
3225 spin_unlock(&dev->addr_list_lock);
3228 static inline void netif_addr_unlock_bh(struct net_device *dev)
3230 spin_unlock_bh(&dev->addr_list_lock);
3234 * dev_addrs walker. Should be used only for read access. Call with
3235 * rcu_read_lock held.
3237 #define for_each_dev_addr(dev, ha) \
3238 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
3240 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
3242 void ether_setup(struct net_device *dev);
3244 /* Support for loadable net-drivers */
3245 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3246 unsigned char name_assign_type,
3247 void (*setup)(struct net_device *),
3248 unsigned int txqs, unsigned int rxqs);
3249 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3250 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
3252 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
3253 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
3256 int register_netdev(struct net_device *dev);
3257 void unregister_netdev(struct net_device *dev);
3259 /* General hardware address lists handling functions */
3260 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3261 struct netdev_hw_addr_list *from_list, int addr_len);
3262 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3263 struct netdev_hw_addr_list *from_list, int addr_len);
3264 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
3265 struct net_device *dev,
3266 int (*sync)(struct net_device *, const unsigned char *),
3267 int (*unsync)(struct net_device *,
3268 const unsigned char *));
3269 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
3270 struct net_device *dev,
3271 int (*unsync)(struct net_device *,
3272 const unsigned char *));
3273 void __hw_addr_init(struct netdev_hw_addr_list *list);
3275 /* Functions used for device addresses handling */
3276 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
3277 unsigned char addr_type);
3278 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
3279 unsigned char addr_type);
3280 void dev_addr_flush(struct net_device *dev);
3281 int dev_addr_init(struct net_device *dev);
3283 /* Functions used for unicast addresses handling */
3284 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
3285 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
3286 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
3287 int dev_uc_sync(struct net_device *to, struct net_device *from);
3288 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
3289 void dev_uc_unsync(struct net_device *to, struct net_device *from);
3290 void dev_uc_flush(struct net_device *dev);
3291 void dev_uc_init(struct net_device *dev);
3294 * __dev_uc_sync - Synchonize device's unicast list
3295 * @dev: device to sync
3296 * @sync: function to call if address should be added
3297 * @unsync: function to call if address should be removed
3299 * Add newly added addresses to the interface, and release
3300 * addresses that have been deleted.
3302 static inline int __dev_uc_sync(struct net_device *dev,
3303 int (*sync)(struct net_device *,
3304 const unsigned char *),
3305 int (*unsync)(struct net_device *,
3306 const unsigned char *))
3308 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
3312 * __dev_uc_unsync - Remove synchronized addresses from device
3313 * @dev: device to sync
3314 * @unsync: function to call if address should be removed
3316 * Remove all addresses that were added to the device by dev_uc_sync().
3318 static inline void __dev_uc_unsync(struct net_device *dev,
3319 int (*unsync)(struct net_device *,
3320 const unsigned char *))
3322 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
3325 /* Functions used for multicast addresses handling */
3326 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
3327 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
3328 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
3329 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
3330 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
3331 int dev_mc_sync(struct net_device *to, struct net_device *from);
3332 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
3333 void dev_mc_unsync(struct net_device *to, struct net_device *from);
3334 void dev_mc_flush(struct net_device *dev);
3335 void dev_mc_init(struct net_device *dev);
3338 * __dev_mc_sync - Synchonize device's multicast list
3339 * @dev: device to sync
3340 * @sync: function to call if address should be added
3341 * @unsync: function to call if address should be removed
3343 * Add newly added addresses to the interface, and release
3344 * addresses that have been deleted.
3346 static inline int __dev_mc_sync(struct net_device *dev,
3347 int (*sync)(struct net_device *,
3348 const unsigned char *),
3349 int (*unsync)(struct net_device *,
3350 const unsigned char *))
3352 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
3356 * __dev_mc_unsync - Remove synchronized addresses from device
3357 * @dev: device to sync
3358 * @unsync: function to call if address should be removed
3360 * Remove all addresses that were added to the device by dev_mc_sync().
3362 static inline void __dev_mc_unsync(struct net_device *dev,
3363 int (*unsync)(struct net_device *,
3364 const unsigned char *))
3366 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
3369 /* Functions used for secondary unicast and multicast support */
3370 void dev_set_rx_mode(struct net_device *dev);
3371 void __dev_set_rx_mode(struct net_device *dev);
3372 int dev_set_promiscuity(struct net_device *dev, int inc);
3373 int dev_set_allmulti(struct net_device *dev, int inc);
3374 void netdev_state_change(struct net_device *dev);
3375 void netdev_notify_peers(struct net_device *dev);
3376 void netdev_features_change(struct net_device *dev);
3377 /* Load a device via the kmod */
3378 void dev_load(struct net *net, const char *name);
3379 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
3380 struct rtnl_link_stats64 *storage);
3381 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
3382 const struct net_device_stats *netdev_stats);
3384 extern int netdev_max_backlog;
3385 extern int netdev_tstamp_prequeue;
3386 extern int weight_p;
3387 extern int bpf_jit_enable;
3389 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3390 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3391 struct list_head **iter);
3392 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3393 struct list_head **iter);
3395 /* iterate through upper list, must be called under RCU read lock */
3396 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3397 for (iter = &(dev)->adj_list.upper, \
3398 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3400 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3402 /* iterate through upper list, must be called under RCU read lock */
3403 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
3404 for (iter = &(dev)->all_adj_list.upper, \
3405 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
3407 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
3409 void *netdev_lower_get_next_private(struct net_device *dev,
3410 struct list_head **iter);
3411 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3412 struct list_head **iter);
3414 #define netdev_for_each_lower_private(dev, priv, iter) \
3415 for (iter = (dev)->adj_list.lower.next, \
3416 priv = netdev_lower_get_next_private(dev, &(iter)); \
3418 priv = netdev_lower_get_next_private(dev, &(iter)))
3420 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
3421 for (iter = &(dev)->adj_list.lower, \
3422 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
3424 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3426 void *netdev_lower_get_next(struct net_device *dev,
3427 struct list_head **iter);
3428 #define netdev_for_each_lower_dev(dev, ldev, iter) \
3429 for (iter = &(dev)->adj_list.lower, \
3430 ldev = netdev_lower_get_next(dev, &(iter)); \
3432 ldev = netdev_lower_get_next(dev, &(iter)))
3434 void *netdev_adjacent_get_private(struct list_head *adj_list);
3435 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3436 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
3437 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
3438 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
3439 int netdev_master_upper_dev_link(struct net_device *dev,
3440 struct net_device *upper_dev);
3441 int netdev_master_upper_dev_link_private(struct net_device *dev,
3442 struct net_device *upper_dev,
3444 void netdev_upper_dev_unlink(struct net_device *dev,
3445 struct net_device *upper_dev);
3446 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3447 void *netdev_lower_dev_get_private(struct net_device *dev,
3448 struct net_device *lower_dev);
3450 /* RSS keys are 40 or 52 bytes long */
3451 #define NETDEV_RSS_KEY_LEN 52
3452 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
3453 void netdev_rss_key_fill(void *buffer, size_t len);
3455 int dev_get_nest_level(struct net_device *dev,
3456 bool (*type_check)(struct net_device *dev));
3457 int skb_checksum_help(struct sk_buff *skb);
3458 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3459 netdev_features_t features, bool tx_path);
3460 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3461 netdev_features_t features);
3464 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3466 return __skb_gso_segment(skb, features, true);
3468 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3470 static inline bool can_checksum_protocol(netdev_features_t features,
3473 return ((features & NETIF_F_GEN_CSUM) ||
3474 ((features & NETIF_F_V4_CSUM) &&
3475 protocol == htons(ETH_P_IP)) ||
3476 ((features & NETIF_F_V6_CSUM) &&
3477 protocol == htons(ETH_P_IPV6)) ||
3478 ((features & NETIF_F_FCOE_CRC) &&
3479 protocol == htons(ETH_P_FCOE)));
3483 void netdev_rx_csum_fault(struct net_device *dev);
3485 static inline void netdev_rx_csum_fault(struct net_device *dev)
3489 /* rx skb timestamps */
3490 void net_enable_timestamp(void);
3491 void net_disable_timestamp(void);
3493 #ifdef CONFIG_PROC_FS
3494 int __init dev_proc_init(void);
3496 #define dev_proc_init() 0
3499 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3500 struct sk_buff *skb, struct net_device *dev,
3503 skb->xmit_more = more ? 1 : 0;
3504 return ops->ndo_start_xmit(skb, dev);
3507 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
3508 struct netdev_queue *txq, bool more)
3510 const struct net_device_ops *ops = dev->netdev_ops;
3513 rc = __netdev_start_xmit(ops, skb, dev, more);
3514 if (rc == NETDEV_TX_OK)
3515 txq_trans_update(txq);
3520 int netdev_class_create_file_ns(struct class_attribute *class_attr,
3522 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
3525 static inline int netdev_class_create_file(struct class_attribute *class_attr)
3527 return netdev_class_create_file_ns(class_attr, NULL);
3530 static inline void netdev_class_remove_file(struct class_attribute *class_attr)
3532 netdev_class_remove_file_ns(class_attr, NULL);
3535 extern struct kobj_ns_type_operations net_ns_type_operations;
3537 const char *netdev_drivername(const struct net_device *dev);
3539 void linkwatch_run_queue(void);
3541 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
3542 netdev_features_t f2)
3544 if (f1 & NETIF_F_GEN_CSUM)
3545 f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3546 if (f2 & NETIF_F_GEN_CSUM)
3547 f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3549 if (f1 & NETIF_F_GEN_CSUM)
3550 f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3555 static inline netdev_features_t netdev_get_wanted_features(
3556 struct net_device *dev)
3558 return (dev->features & ~dev->hw_features) | dev->wanted_features;
3560 netdev_features_t netdev_increment_features(netdev_features_t all,
3561 netdev_features_t one, netdev_features_t mask);
3563 /* Allow TSO being used on stacked device :
3564 * Performing the GSO segmentation before last device
3565 * is a performance improvement.
3567 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
3568 netdev_features_t mask)
3570 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
3573 int __netdev_update_features(struct net_device *dev);
3574 void netdev_update_features(struct net_device *dev);
3575 void netdev_change_features(struct net_device *dev);
3577 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
3578 struct net_device *dev);
3580 netdev_features_t netif_skb_features(struct sk_buff *skb);
3582 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3584 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
3586 /* check flags correspondence */
3587 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
3588 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
3589 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
3590 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
3591 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
3592 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
3593 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
3594 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
3595 BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
3596 BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
3597 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
3598 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
3599 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
3601 return (features & feature) == feature;
3604 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
3606 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
3607 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
3610 static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
3611 netdev_features_t features)
3613 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
3614 (dev->netdev_ops->ndo_gso_check &&
3615 !dev->netdev_ops->ndo_gso_check(skb, dev)) ||
3616 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
3617 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
3620 static inline void netif_set_gso_max_size(struct net_device *dev,
3623 dev->gso_max_size = size;
3626 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
3627 int pulled_hlen, u16 mac_offset,
3630 skb->protocol = protocol;
3631 skb->encapsulation = 1;
3632 skb_push(skb, pulled_hlen);
3633 skb_reset_transport_header(skb);
3634 skb->mac_header = mac_offset;
3635 skb->network_header = skb->mac_header + mac_len;
3636 skb->mac_len = mac_len;
3639 static inline bool netif_is_macvlan(struct net_device *dev)
3641 return dev->priv_flags & IFF_MACVLAN;
3644 static inline bool netif_is_macvlan_port(struct net_device *dev)
3646 return dev->priv_flags & IFF_MACVLAN_PORT;
3649 static inline bool netif_is_ipvlan(struct net_device *dev)
3651 return dev->priv_flags & IFF_IPVLAN_SLAVE;
3654 static inline bool netif_is_ipvlan_port(struct net_device *dev)
3656 return dev->priv_flags & IFF_IPVLAN_MASTER;
3659 static inline bool netif_is_bond_master(struct net_device *dev)
3661 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
3664 static inline bool netif_is_bond_slave(struct net_device *dev)
3666 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
3669 static inline bool netif_supports_nofcs(struct net_device *dev)
3671 return dev->priv_flags & IFF_SUPP_NOFCS;
3674 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
3675 static inline void netif_keep_dst(struct net_device *dev)
3677 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
3680 extern struct pernet_operations __net_initdata loopback_net_ops;
3682 /* Logging, debugging and troubleshooting/diagnostic helpers. */
3684 /* netdev_printk helpers, similar to dev_printk */
3686 static inline const char *netdev_name(const struct net_device *dev)
3688 if (!dev->name[0] || strchr(dev->name, '%'))
3689 return "(unnamed net_device)";
3693 static inline const char *netdev_reg_state(const struct net_device *dev)
3695 switch (dev->reg_state) {
3696 case NETREG_UNINITIALIZED: return " (uninitialized)";
3697 case NETREG_REGISTERED: return "";
3698 case NETREG_UNREGISTERING: return " (unregistering)";
3699 case NETREG_UNREGISTERED: return " (unregistered)";
3700 case NETREG_RELEASED: return " (released)";
3701 case NETREG_DUMMY: return " (dummy)";
3704 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
3705 return " (unknown)";
3709 void netdev_printk(const char *level, const struct net_device *dev,
3710 const char *format, ...);
3712 void netdev_emerg(const struct net_device *dev, const char *format, ...);
3714 void netdev_alert(const struct net_device *dev, const char *format, ...);
3716 void netdev_crit(const struct net_device *dev, const char *format, ...);
3718 void netdev_err(const struct net_device *dev, const char *format, ...);
3720 void netdev_warn(const struct net_device *dev, const char *format, ...);
3722 void netdev_notice(const struct net_device *dev, const char *format, ...);
3724 void netdev_info(const struct net_device *dev, const char *format, ...);
3726 #define MODULE_ALIAS_NETDEV(device) \
3727 MODULE_ALIAS("netdev-" device)
3729 #if defined(CONFIG_DYNAMIC_DEBUG)
3730 #define netdev_dbg(__dev, format, args...) \
3732 dynamic_netdev_dbg(__dev, format, ##args); \
3734 #elif defined(DEBUG)
3735 #define netdev_dbg(__dev, format, args...) \
3736 netdev_printk(KERN_DEBUG, __dev, format, ##args)
3738 #define netdev_dbg(__dev, format, args...) \
3741 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
3745 #if defined(VERBOSE_DEBUG)
3746 #define netdev_vdbg netdev_dbg
3749 #define netdev_vdbg(dev, format, args...) \
3752 netdev_printk(KERN_DEBUG, dev, format, ##args); \
3758 * netdev_WARN() acts like dev_printk(), but with the key difference
3759 * of using a WARN/WARN_ON to get the message out, including the
3760 * file/line information and a backtrace.
3762 #define netdev_WARN(dev, format, args...) \
3763 WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
3764 netdev_reg_state(dev), ##args)
3766 /* netif printk helpers, similar to netdev_printk */
3768 #define netif_printk(priv, type, level, dev, fmt, args...) \
3770 if (netif_msg_##type(priv)) \
3771 netdev_printk(level, (dev), fmt, ##args); \
3774 #define netif_level(level, priv, type, dev, fmt, args...) \
3776 if (netif_msg_##type(priv)) \
3777 netdev_##level(dev, fmt, ##args); \
3780 #define netif_emerg(priv, type, dev, fmt, args...) \
3781 netif_level(emerg, priv, type, dev, fmt, ##args)
3782 #define netif_alert(priv, type, dev, fmt, args...) \
3783 netif_level(alert, priv, type, dev, fmt, ##args)
3784 #define netif_crit(priv, type, dev, fmt, args...) \
3785 netif_level(crit, priv, type, dev, fmt, ##args)
3786 #define netif_err(priv, type, dev, fmt, args...) \
3787 netif_level(err, priv, type, dev, fmt, ##args)
3788 #define netif_warn(priv, type, dev, fmt, args...) \
3789 netif_level(warn, priv, type, dev, fmt, ##args)
3790 #define netif_notice(priv, type, dev, fmt, args...) \
3791 netif_level(notice, priv, type, dev, fmt, ##args)
3792 #define netif_info(priv, type, dev, fmt, args...) \
3793 netif_level(info, priv, type, dev, fmt, ##args)
3795 #if defined(CONFIG_DYNAMIC_DEBUG)
3796 #define netif_dbg(priv, type, netdev, format, args...) \
3798 if (netif_msg_##type(priv)) \
3799 dynamic_netdev_dbg(netdev, format, ##args); \
3801 #elif defined(DEBUG)
3802 #define netif_dbg(priv, type, dev, format, args...) \
3803 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
3805 #define netif_dbg(priv, type, dev, format, args...) \
3808 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3813 #if defined(VERBOSE_DEBUG)
3814 #define netif_vdbg netif_dbg
3816 #define netif_vdbg(priv, type, dev, format, args...) \
3819 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3825 * The list of packet types we will receive (as opposed to discard)
3826 * and the routines to invoke.
3828 * Why 16. Because with 16 the only overlap we get on a hash of the
3829 * low nibble of the protocol value is RARP/SNAP/X.25.
3831 * NOTE: That is no longer true with the addition of VLAN tags. Not
3832 * sure which should go first, but I bet it won't make much
3833 * difference if we are running VLANs. The good news is that
3834 * this protocol won't be in the list unless compiled in, so
3835 * the average user (w/out VLANs) will not be adversely affected.
3851 #define PTYPE_HASH_SIZE (16)
3852 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3854 #endif /* _LINUX_NETDEVICE_H */