Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / netfilter / ip_tables.c
index 2d0e265fef6e7f2c657c54d4db0fd10e010fa68b..6c72fbb7b49eb97d3574fd8c10174f9580752911 100644 (file)
@@ -254,15 +254,13 @@ static void trace_packet(const struct sk_buff *skb,
                         const struct xt_table_info *private,
                         const struct ipt_entry *e)
 {
-       const void *table_base;
        const struct ipt_entry *root;
        const char *hookname, *chainname, *comment;
        const struct ipt_entry *iter;
        unsigned int rulenum = 0;
        struct net *net = dev_net(in ? in : out);
 
-       table_base = private->entries[smp_processor_id()];
-       root = get_entry(table_base, private->hook_entry[hook]);
+       root = get_entry(private->entries, private->hook_entry[hook]);
 
        hookname = chainname = hooknames[hook];
        comment = comments[NF_IP_TRACE_COMMENT_RULE];
@@ -331,7 +329,7 @@ ipt_do_table(struct sk_buff *skb,
         * pointer.
         */
        smp_read_barrier_depends();
-       table_base = private->entries[cpu];
+       table_base = private->entries;
        jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
        stackptr   = per_cpu_ptr(private->stackptr, cpu);
        origptr    = *stackptr;
@@ -345,6 +343,7 @@ ipt_do_table(struct sk_buff *skb,
        do {
                const struct xt_entry_target *t;
                const struct xt_entry_match *ematch;
+               struct xt_counters *counter;
 
                IP_NF_ASSERT(e);
                if (!ip_packet_match(ip, indev, outdev,
@@ -361,7 +360,8 @@ ipt_do_table(struct sk_buff *skb,
                                goto no_match;
                }
 
-               ADD_COUNTER(e->counters, skb->len, 1);
+               counter = xt_get_this_cpu_counter(&e->counters);
+               ADD_COUNTER(*counter, skb->len, 1);
 
                t = ipt_get_target(e);
                IP_NF_ASSERT(t->u.kernel.target);
@@ -665,6 +665,10 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
        if (ret)
                return ret;
 
+       e->counters.pcnt = xt_percpu_counter_alloc();
+       if (IS_ERR_VALUE(e->counters.pcnt))
+               return -ENOMEM;
+
        j = 0;
        mtpar.net       = net;
        mtpar.table     = name;
@@ -691,6 +695,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
        ret = check_target(e, net, name);
        if (ret)
                goto err;
+
        return 0;
  err:
        module_put(t->u.kernel.target->me);
@@ -700,6 +705,9 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
                        break;
                cleanup_match(ematch, net);
        }
+
+       xt_percpu_counter_free(e->counters.pcnt);
+
        return ret;
 }
 
@@ -784,6 +792,7 @@ cleanup_entry(struct ipt_entry *e, struct net *net)
        if (par.target->destroy != NULL)
                par.target->destroy(&par);
        module_put(par.target->me);
+       xt_percpu_counter_free(e->counters.pcnt);
 }
 
 /* Checks and translates the user-supplied table segment (held in
@@ -866,12 +875,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
                return ret;
        }
 
-       /* And one copy for every other CPU */
-       for_each_possible_cpu(i) {
-               if (newinfo->entries[i] && newinfo->entries[i] != entry0)
-                       memcpy(newinfo->entries[i], entry0, newinfo->size);
-       }
-
        return ret;
 }
 
@@ -887,14 +890,16 @@ get_counters(const struct xt_table_info *t,
                seqcount_t *s = &per_cpu(xt_recseq, cpu);
 
                i = 0;
-               xt_entry_foreach(iter, t->entries[cpu], t->size) {
+               xt_entry_foreach(iter, t->entries, t->size) {
+                       struct xt_counters *tmp;
                        u64 bcnt, pcnt;
                        unsigned int start;
 
+                       tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
                        do {
                                start = read_seqcount_begin(s);
-                               bcnt = iter->counters.bcnt;
-                               pcnt = iter->counters.pcnt;
+                               bcnt = tmp->bcnt;
+                               pcnt = tmp->pcnt;
                        } while (read_seqcount_retry(s, start));
 
                        ADD_COUNTER(counters[i], bcnt, pcnt);
@@ -939,11 +944,7 @@ copy_entries_to_user(unsigned int total_size,
        if (IS_ERR(counters))
                return PTR_ERR(counters);
 
-       /* choose the copy that is on our node/cpu, ...
-        * This choice is lazy (because current thread is
-        * allowed to migrate to another cpu)
-        */
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries;
        if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
                ret = -EFAULT;
                goto free_counters;
@@ -1051,16 +1052,16 @@ static int compat_table_info(const struct xt_table_info *info,
                             struct xt_table_info *newinfo)
 {
        struct ipt_entry *iter;
-       void *loc_cpu_entry;
+       const void *loc_cpu_entry;
        int ret;
 
        if (!newinfo || !info)
                return -EINVAL;
 
-       /* we dont care about newinfo->entries[] */
+       /* we dont care about newinfo->entries */
        memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
        newinfo->initial_entries = 0;
-       loc_cpu_entry = info->entries[raw_smp_processor_id()];
+       loc_cpu_entry = info->entries;
        xt_compat_init_offsets(AF_INET, info->number);
        xt_entry_foreach(iter, loc_cpu_entry, info->size) {
                ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
@@ -1181,7 +1182,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
        struct xt_table *t;
        struct xt_table_info *oldinfo;
        struct xt_counters *counters;
-       void *loc_cpu_old_entry;
        struct ipt_entry *iter;
 
        ret = 0;
@@ -1224,8 +1224,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
        get_counters(oldinfo, counters);
 
        /* Decrease module usage counts and free resource */
-       loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
-       xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
+       xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
                cleanup_entry(iter, net);
 
        xt_free_table_info(oldinfo);
@@ -1271,8 +1270,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
        if (!newinfo)
                return -ENOMEM;
 
-       /* choose the copy that is on our node/cpu */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
                           tmp.size) != 0) {
                ret = -EFAULT;
@@ -1303,7 +1301,7 @@ static int
 do_add_counters(struct net *net, const void __user *user,
                 unsigned int len, int compat)
 {
-       unsigned int i, curcpu;
+       unsigned int i;
        struct xt_counters_info tmp;
        struct xt_counters *paddc;
        unsigned int num_counters;
@@ -1313,7 +1311,6 @@ do_add_counters(struct net *net, const void __user *user,
        struct xt_table *t;
        const struct xt_table_info *private;
        int ret = 0;
-       void *loc_cpu_entry;
        struct ipt_entry *iter;
        unsigned int addend;
 #ifdef CONFIG_COMPAT
@@ -1369,12 +1366,12 @@ do_add_counters(struct net *net, const void __user *user,
        }
 
        i = 0;
-       /* Choose the copy that is on our node */
-       curcpu = smp_processor_id();
-       loc_cpu_entry = private->entries[curcpu];
        addend = xt_write_recseq_begin();
-       xt_entry_foreach(iter, loc_cpu_entry, private->size) {
-               ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
+       xt_entry_foreach(iter, private->entries, private->size) {
+               struct xt_counters *tmp;
+
+               tmp = xt_get_this_cpu_counter(&iter->counters);
+               ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
                ++i;
        }
        xt_write_recseq_end(addend);
@@ -1444,7 +1441,6 @@ static int
 compat_find_calc_match(struct xt_entry_match *m,
                       const char *name,
                       const struct ipt_ip *ip,
-                      unsigned int hookmask,
                       int *size)
 {
        struct xt_match *match;
@@ -1513,8 +1509,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
        entry_offset = (void *)e - (void *)base;
        j = 0;
        xt_ematch_foreach(ematch, e) {
-               ret = compat_find_calc_match(ematch, name,
-                                            &e->ip, e->comefrom, &off);
+               ret = compat_find_calc_match(ematch, name, &e->ip, &off);
                if (ret != 0)
                        goto release_matches;
                ++j;
@@ -1610,6 +1605,10 @@ compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
        unsigned int j;
        int ret = 0;
 
+       e->counters.pcnt = xt_percpu_counter_alloc();
+       if (IS_ERR_VALUE(e->counters.pcnt))
+               return -ENOMEM;
+
        j = 0;
        mtpar.net       = net;
        mtpar.table     = name;
@@ -1634,6 +1633,9 @@ compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
                        break;
                cleanup_match(ematch, net);
        }
+
+       xt_percpu_counter_free(e->counters.pcnt);
+
        return ret;
 }
 
@@ -1718,7 +1720,7 @@ translate_compat_table(struct net *net,
                newinfo->hook_entry[i] = info->hook_entry[i];
                newinfo->underflow[i] = info->underflow[i];
        }
-       entry1 = newinfo->entries[raw_smp_processor_id()];
+       entry1 = newinfo->entries;
        pos = entry1;
        size = total_size;
        xt_entry_foreach(iter0, entry0, total_size) {
@@ -1770,11 +1772,6 @@ translate_compat_table(struct net *net,
                return ret;
        }
 
-       /* And one copy for every other CPU */
-       for_each_possible_cpu(i)
-               if (newinfo->entries[i] && newinfo->entries[i] != entry1)
-                       memcpy(newinfo->entries[i], entry1, newinfo->size);
-
        *pinfo = newinfo;
        *pentry0 = entry1;
        xt_free_table_info(info);
@@ -1821,8 +1818,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
        if (!newinfo)
                return -ENOMEM;
 
-       /* choose the copy that is on our node/cpu */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
                           tmp.size) != 0) {
                ret = -EFAULT;
@@ -1893,7 +1889,6 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
        void __user *pos;
        unsigned int size;
        int ret = 0;
-       const void *loc_cpu_entry;
        unsigned int i = 0;
        struct ipt_entry *iter;
 
@@ -1901,14 +1896,9 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
        if (IS_ERR(counters))
                return PTR_ERR(counters);
 
-       /* choose the copy that is on our node/cpu, ...
-        * This choice is lazy (because current thread is
-        * allowed to migrate to another cpu)
-        */
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
        pos = userptr;
        size = total_size;
-       xt_entry_foreach(iter, loc_cpu_entry, total_size) {
+       xt_entry_foreach(iter, private->entries, total_size) {
                ret = compat_copy_entry_to_user(iter, &pos,
                                                &size, counters, i++);
                if (ret != 0)
@@ -2083,8 +2073,7 @@ struct xt_table *ipt_register_table(struct net *net,
                goto out;
        }
 
-       /* choose the copy on our node/cpu, but dont care about preemption */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        memcpy(loc_cpu_entry, repl->entries, repl->size);
 
        ret = translate_table(net, newinfo, loc_cpu_entry, repl);
@@ -2115,7 +2104,7 @@ void ipt_unregister_table(struct net *net, struct xt_table *table)
        private = xt_unregister_table(table);
 
        /* Decrease module usage counts and free resources */
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries;
        xt_entry_foreach(iter, loc_cpu_entry, private->size)
                cleanup_entry(iter, net);
        if (private->number > private->initial_entries)