#ifdef CONFIG_RPS
struct kobject kobj;
#endif
-
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+ int numa_node;
+#endif
/*
* write mostly part
*/
u64 tx_dropped;
} ____cacheline_aligned_in_smp;
+static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
+{
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+ return q->numa_node;
+#else
+ return -1;
+#endif
+}
+
+static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
+{
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+ q->numa_node = node;
+#endif
+}
+
#ifdef CONFIG_RPS
/*
* This structure holds an RPS map which can be of variable length. The
}
dev->_tx = tx;
- for (i = 0; i < count; i++)
+ for (i = 0; i < count; i++) {
+ netdev_queue_numa_node_write(&tx[i], -1);
tx[i].dev = dev;
-
+ }
return 0;
}
struct xps_map *map, *new_map;
struct xps_dev_maps *dev_maps, *new_dev_maps;
int nonempty = 0;
+ int numa_node = -2;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
pos = map_len = alloc_len = 0;
need_set = cpu_isset(cpu, *mask) && cpu_online(cpu);
-
+#ifdef CONFIG_NUMA
+ if (need_set) {
+ if (numa_node == -2)
+ numa_node = cpu_to_node(cpu);
+ else if (numa_node != cpu_to_node(cpu))
+ numa_node = -1;
+ }
+#endif
if (need_set && pos >= map_len) {
/* Need to add queue to this CPU's map */
if (map_len >= alloc_len) {
if (dev_maps)
call_rcu(&dev_maps->rcu, xps_dev_maps_release);
+ netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node : -1);
+
mutex_unlock(&xps_map_mutex);
free_cpumask_var(mask);
size = QDISC_ALIGN(sizeof(*sch));
size += ops->priv_size + (QDISC_ALIGNTO - 1);
- p = kzalloc(size, GFP_KERNEL);
+ p = kzalloc_node(size, GFP_KERNEL,
+ netdev_queue_numa_node_read(dev_queue));
+
if (!p)
goto errout;
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);