busy_read
----------------
-Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL)
+Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for packets on the device queue.
This sets the default value of the SO_BUSY_POLL socket option.
Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
busy_poll
----------------
-Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL)
+Low latency busy poll timeout for poll and select. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for events.
Recommended value depends on the number of sockets you poll on.
For several sockets 50, for several hundreds 100.
struct napi_struct napi;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define BNX2X_FP_STATE_IDLE 0
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */
spinlock_t lock;
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
union host_hc_status_block status_blk;
/* chip independent shortcuts into sb structure */
#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
{
spin_lock_init(&fp->lock);
{
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
return work_done;
}
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
int bnx2x_low_latency_recv(struct napi_struct *napi)
{
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnx2x_low_latency_recv,
#endif
};
#include <net/busy_poll.h>
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
#define LL_EXTENDED_STATS
#endif
/* common prefix used by pr_<> macros */
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define IXGBE_QV_STATE_IDLE 0
#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
spinlock_t lock;
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
return q_vector->state & IXGBE_QV_USER_PEND;
}
-#else /* CONFIG_NET_LL_RX_POLL */
+#else /* CONFIG_NET_RX_BUSY_POLL */
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
}
{
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_IXGBE_HWMON
return total_rx_packets;
}
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
static int ixgbe_low_latency_recv(struct napi_struct *napi)
{
return found;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/**
* ixgbe_configure_msix - Configure MSI-X hardware
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = ixgbe_low_latency_recv,
#endif
#ifdef IXGBE_FCOE
case ETH_SS_STATS:
return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
(priv->tx_ring_num * 2) +
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
(priv->rx_ring_num * 5);
#else
(priv->rx_ring_num * 2);
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i].packets;
data[index++] = priv->rx_ring[i].bytes;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
data[index++] = priv->rx_ring[i].yields;
data[index++] = priv->rx_ring[i].misses;
data[index++] = priv->rx_ring[i].cleaned;
"rx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_napi_yield", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
return 0;
}
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
static int mlx4_en_low_latency_recv(struct napi_struct *napi)
{
return done;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_RFS_ACCEL
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
};
void *rx_info;
unsigned long bytes;
unsigned long packets;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned long yields;
unsigned long misses;
unsigned long cleaned;
struct mlx4_cqe *buf;
#define MLX4_EN_OPCODE_ERROR 0x1e
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define MLX4_EN_CQ_STATE_IDLE 0
#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
};
struct mlx4_en_port_profile {
struct rcu_head rcu;
};
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
{
spin_lock_init(&cq->poll_lock);
{
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
gfp_t gfp);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
int (*ndo_busy_poll)(struct napi_struct *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
/* 7/9 bit hole (depending on ndisc_nodetype presence) */
kmemcheck_bitfield_end(flags2);
-#if defined CONFIG_NET_DMA || defined CONFIG_NET_LL_RX_POLL
+#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
union {
unsigned int napi_id;
dma_cookie_t dma_cookie;
#include <linux/netdevice.h>
#include <net/ip.h>
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
struct napi_struct;
extern unsigned int sysctl_net_busy_read __read_mostly;
sk->sk_napi_id = skb->napi_id;
}
-#else /* CONFIG_NET_LL_RX_POLL */
+#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
{
return 0;
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#endif /* _LINUX_NET_BUSY_POLL_H */
#ifdef CONFIG_RPS
__u32 sk_rxhash;
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_napi_id;
unsigned int sk_ll_usec;
#endif
Cgroup subsystem for use in assigning processes to network priorities on
a per-interface basis
-config NET_LL_RX_POLL
+config NET_RX_BUSY_POLL
boolean
default y
skb_copy_secmark(new, old);
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
new->napi_id = old->napi_id;
#endif
}
sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
break;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL:
/* allow unprivileged users to decrease the value */
if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
break;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL:
v.val = sk->sk_ll_usec;
break;
sk->sk_stamp = ktime_set(-1L, 0);
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = 0;
sk->sk_ll_usec = sysctl_net_busy_read;
#endif
.proc_handler = flow_limit_table_len_sysctl
},
#endif /* CONFIG_NET_FLOW_LIMIT */
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
{
.procname = "busy_poll",
.data = &sysctl_net_busy_poll,
#include <linux/atalk.h>
#include <net/busy_poll.h>
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
#endif