ipv4: reduce percpu needs for icmpmsg mibs
authorEric Dumazet <eric.dumazet@gmail.com>
Tue, 8 Nov 2011 13:04:43 +0000 (13:04 +0000)
committerDavid S. Miller <davem@davemloft.net>
Wed, 9 Nov 2011 21:04:20 +0000 (16:04 -0500)
Reading /proc/net/snmp on a machine with a lot of cpus is very expensive
(can be ~88000 us).

This is because ICMPMSG MIB uses 4096 bytes per cpu, and folding values
for all possible cpus can read 16 Mbytes of memory.

ICMP messages are not considered as fast path on a typical server, and
eventually few cpus handle them anyway. We can afford an atomic
operation instead of using percpu data.

This saves 4096 bytes per cpu and per network namespace.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/icmp.h
include/net/netns/mib.h
include/net/snmp.h
net/ipv4/af_inet.c
net/ipv4/proc.c

index f0698b955b73c7ab41051fdf6508f4ab827932c0..75d615649071e39688b7f12a0e8a9c4066b8b31d 100644 (file)
@@ -31,8 +31,8 @@ struct icmp_err {
 extern const struct icmp_err icmp_err_convert[];
 #define ICMP_INC_STATS(net, field)     SNMP_INC_STATS((net)->mib.icmp_statistics, field)
 #define ICMP_INC_STATS_BH(net, field)  SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
-#define ICMPMSGOUT_INC_STATS(net, field)       SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256)
-#define ICMPMSGIN_INC_STATS_BH(net, field)     SNMP_INC_STATS_BH((net)->mib.icmpmsg_statistics, field)
+#define ICMPMSGOUT_INC_STATS(net, field)       SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
+#define ICMPMSGIN_INC_STATS_BH(net, field)     SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
 
 struct dst_entry;
 struct net_proto_family;
index 0b44112e2366e535a8d6ab7a8fcc726f4aad8de6..f360135cb69ffb1375789eb59c137faa11b058ad 100644 (file)
@@ -10,7 +10,7 @@ struct netns_mib {
        DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
        DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
        DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
-       DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics);
+       DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        struct proc_dir_entry *proc_net_devsnmp6;
index 8f0f9ac0307ffc3ead58ce031f62d8ec8b2d1f7e..0feafa68da01eb72988e88b96d55688a66ca4283 100644 (file)
@@ -67,7 +67,7 @@ struct icmp_mib {
 
 #define ICMPMSG_MIB_MAX        __ICMPMSG_MIB_MAX
 struct icmpmsg_mib {
-       unsigned long   mibs[ICMPMSG_MIB_MAX];
+       atomic_long_t   mibs[ICMPMSG_MIB_MAX];
 };
 
 /* ICMP6 (IPv6-ICMP) */
index 1b5096a9875aae06db3fa9857800881dcc305d31..b2bbcd0ebd19c1f9d293545b31653b97b7bb4af1 100644 (file)
@@ -1572,9 +1572,9 @@ static __net_init int ipv4_mib_init_net(struct net *net)
                          sizeof(struct icmp_mib),
                          __alignof__(struct icmp_mib)) < 0)
                goto err_icmp_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
-                         sizeof(struct icmpmsg_mib),
-                         __alignof__(struct icmpmsg_mib)) < 0)
+       net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
+                                             GFP_KERNEL);
+       if (!net->mib.icmpmsg_statistics)
                goto err_icmpmsg_mib;
 
        tcp_mib_init(net);
@@ -1598,7 +1598,7 @@ err_tcp_mib:
 
 static __net_exit void ipv4_mib_exit_net(struct net *net)
 {
-       snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
+       kfree(net->mib.icmpmsg_statistics);
        snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
        snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
        snmp_mib_free((void __percpu **)net->mib.udp_statistics);
index 466ea8bb7a4d916e41c838389c7dcaf2b7f01b4a..961eed4f510a26e78d71dcf41fa265a414249dd0 100644 (file)
@@ -288,7 +288,7 @@ static void icmpmsg_put(struct seq_file *seq)
 
        count = 0;
        for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
-               val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
+               val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]);
                if (val) {
                        type[count] = i;
                        vals[count++] = val;
@@ -307,6 +307,7 @@ static void icmp_put(struct seq_file *seq)
 {
        int i;
        struct net *net = seq->private;
+       atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs;
 
        seq_puts(seq, "\nIcmp: InMsgs InErrors");
        for (i=0; icmpmibmap[i].name != NULL; i++)
@@ -319,15 +320,13 @@ static void icmp_put(struct seq_file *seq)
                snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
        for (i=0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                       snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
-                               icmpmibmap[i].index));
+                          atomic_long_read(ptr + icmpmibmap[i].index));
        seq_printf(seq, " %lu %lu",
                snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
                snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
        for (i=0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                       snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
-                               icmpmibmap[i].index | 0x100));
+                          atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
 }
 
 /*