checkpatch: make --strict the default for drivers/staging files and patches
[firefly-linux-kernel-4.4.55.git] / net / netfilter / nf_conntrack_core.c
index 3c20d02aee738c5293a5b449f28ebff596c7232d..eedf0495f11f5eb93c4bce1e2ffbb1b04cc273b3 100644 (file)
@@ -126,7 +126,7 @@ EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 unsigned int nf_conntrack_hash_rnd __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
 
-static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
 {
        unsigned int n;
 
@@ -135,7 +135,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
         * three bytes manually.
         */
        n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
-       return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
+       return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
                      (((__force __u16)tuple->dst.u.all << 16) |
                      tuple->dst.protonum));
 }
@@ -151,15 +151,15 @@ static u32 hash_bucket(u32 hash, const struct net *net)
 }
 
 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
-                                 u16 zone, unsigned int size)
+                                 unsigned int size)
 {
-       return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
+       return __hash_bucket(hash_conntrack_raw(tuple), size);
 }
 
-static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
+static inline u_int32_t hash_conntrack(const struct net *net,
                                       const struct nf_conntrack_tuple *tuple)
 {
-       return __hash_conntrack(tuple, zone, net->ct.htable_size);
+       return __hash_conntrack(tuple, net->ct.htable_size);
 }
 
 bool
@@ -288,7 +288,9 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
 }
 
 /* Released via destroy_conntrack() */
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+                                const struct nf_conntrack_zone *zone,
+                                gfp_t flags)
 {
        struct nf_conn *tmpl;
 
@@ -299,24 +301,15 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
        tmpl->status = IPS_TEMPLATE;
        write_pnet(&tmpl->ct_net, net);
 
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-       if (zone) {
-               struct nf_conntrack_zone *nf_ct_zone;
+       if (nf_ct_zone_add(tmpl, flags, zone) < 0)
+               goto out_free;
 
-               nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
-               if (!nf_ct_zone)
-                       goto out_free;
-               nf_ct_zone->id = zone;
-       }
-#endif
        atomic_set(&tmpl->ct_general.use, 0);
 
        return tmpl;
-#ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
        kfree(tmpl);
        return NULL;
-#endif
 }
 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
 
@@ -373,7 +366,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
        unsigned int hash, reply_hash;
-       u16 zone = nf_ct_zone(ct);
        unsigned int sequence;
 
        nf_ct_helper_destroy(ct);
@@ -381,9 +373,9 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
        local_bh_disable();
        do {
                sequence = read_seqcount_begin(&net->ct.generation);
-               hash = hash_conntrack(net, zone,
+               hash = hash_conntrack(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-               reply_hash = hash_conntrack(net, zone,
+               reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
@@ -431,8 +423,8 @@ static void death_by_timeout(unsigned long ul_conntrack)
 
 static inline bool
 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
-                       const struct nf_conntrack_tuple *tuple,
-                       u16 zone)
+               const struct nf_conntrack_tuple *tuple,
+               const struct nf_conntrack_zone *zone)
 {
        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
 
@@ -440,8 +432,8 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
         * so we need to check that the conntrack is confirmed
         */
        return nf_ct_tuple_equal(tuple, &h->tuple) &&
-               nf_ct_zone(ct) == zone &&
-               nf_ct_is_confirmed(ct);
+              nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
+              nf_ct_is_confirmed(ct);
 }
 
 /*
@@ -450,7 +442,7 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
  */
 static struct nf_conntrack_tuple_hash *
-____nf_conntrack_find(struct net *net, u16 zone,
+____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple, u32 hash)
 {
        struct nf_conntrack_tuple_hash *h;
@@ -486,7 +478,7 @@ begin:
 
 /* Find a connection corresponding to a tuple. */
 static struct nf_conntrack_tuple_hash *
-__nf_conntrack_find_get(struct net *net, u16 zone,
+__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                        const struct nf_conntrack_tuple *tuple, u32 hash)
 {
        struct nf_conntrack_tuple_hash *h;
@@ -513,11 +505,11 @@ begin:
 }
 
 struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple)
 {
        return __nf_conntrack_find_get(net, zone, tuple,
-                                      hash_conntrack_raw(tuple, zone));
+                                      hash_conntrack_raw(tuple));
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
@@ -536,11 +528,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
 int
 nf_conntrack_hash_check_insert(struct nf_conn *ct)
 {
+       const struct nf_conntrack_zone *zone;
        struct net *net = nf_ct_net(ct);
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
-       u16 zone;
        unsigned int sequence;
 
        zone = nf_ct_zone(ct);
@@ -548,9 +540,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        local_bh_disable();
        do {
                sequence = read_seqcount_begin(&net->ct.generation);
-               hash = hash_conntrack(net, zone,
+               hash = hash_conntrack(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-               reply_hash = hash_conntrack(net, zone,
+               reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
@@ -558,12 +550,14 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
 
        add_timer(&ct->timeout);
@@ -588,6 +582,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
 {
+       const struct nf_conntrack_zone *zone;
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
@@ -596,7 +591,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        struct hlist_nulls_node *n;
        enum ip_conntrack_info ctinfo;
        struct net *net;
-       u16 zone;
        unsigned int sequence;
 
        ct = nf_ct_get(skb, &ctinfo);
@@ -617,7 +611,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                /* reuse the hash saved before */
                hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
                hash = hash_bucket(hash, net);
-               reply_hash = hash_conntrack(net, zone,
+               reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
@@ -649,12 +643,14 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
 
        /* Timer relative to confirmation time, not original
@@ -707,11 +703,14 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
                         const struct nf_conn *ignored_conntrack)
 {
        struct net *net = nf_ct_net(ignored_conntrack);
+       const struct nf_conntrack_zone *zone;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
        struct nf_conn *ct;
-       u16 zone = nf_ct_zone(ignored_conntrack);
-       unsigned int hash = hash_conntrack(net, zone, tuple);
+       unsigned int hash;
+
+       zone = nf_ct_zone(ignored_conntrack);
+       hash = hash_conntrack(net, tuple);
 
        /* Disable BHs the entire time since we need to disable them at
         * least once for the stats anyway.
@@ -721,7 +720,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
                ct = nf_ct_tuplehash_to_ctrack(h);
                if (ct != ignored_conntrack &&
                    nf_ct_tuple_equal(tuple, &h->tuple) &&
-                   nf_ct_zone(ct) == zone) {
+                   nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
                        NF_CT_STAT_INC(net, found);
                        rcu_read_unlock_bh();
                        return 1;
@@ -810,7 +809,8 @@ void init_nf_conntrack_hash_rnd(void)
 }
 
 static struct nf_conn *
-__nf_conntrack_alloc(struct net *net, u16 zone,
+__nf_conntrack_alloc(struct net *net,
+                    const struct nf_conntrack_zone *zone,
                     const struct nf_conntrack_tuple *orig,
                     const struct nf_conntrack_tuple *repl,
                     gfp_t gfp, u32 hash)
@@ -820,7 +820,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
        if (unlikely(!nf_conntrack_hash_rnd)) {
                init_nf_conntrack_hash_rnd();
                /* recompute the hash as nf_conntrack_hash_rnd is initialized */
-               hash = hash_conntrack_raw(orig, zone);
+               hash = hash_conntrack_raw(orig);
        }
 
        /* We don't want any race condition at early drop stage */
@@ -840,10 +840,9 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
         * SLAB_DESTROY_BY_RCU.
         */
        ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
-       if (ct == NULL) {
-               atomic_dec(&net->ct.count);
-               return ERR_PTR(-ENOMEM);
-       }
+       if (ct == NULL)
+               goto out;
+
        spin_lock_init(&ct->lock);
        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
        ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -857,31 +856,24 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
        memset(&ct->__nfct_init_offset[0], 0,
               offsetof(struct nf_conn, proto) -
               offsetof(struct nf_conn, __nfct_init_offset[0]));
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-       if (zone) {
-               struct nf_conntrack_zone *nf_ct_zone;
 
-               nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
-               if (!nf_ct_zone)
-                       goto out_free;
-               nf_ct_zone->id = zone;
-       }
-#endif
+       if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
+               goto out_free;
+
        /* Because we use RCU lookups, we set ct_general.use to zero before
         * this is inserted in any list.
         */
        atomic_set(&ct->ct_general.use, 0);
        return ct;
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
-       atomic_dec(&net->ct.count);
        kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+out:
+       atomic_dec(&net->ct.count);
        return ERR_PTR(-ENOMEM);
-#endif
 }
 
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+                                  const struct nf_conntrack_zone *zone,
                                   const struct nf_conntrack_tuple *orig,
                                   const struct nf_conntrack_tuple *repl,
                                   gfp_t gfp)
@@ -923,8 +915,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        struct nf_conntrack_tuple repl_tuple;
        struct nf_conntrack_ecache *ecache;
        struct nf_conntrack_expect *exp = NULL;
-       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+       const struct nf_conntrack_zone *zone;
        struct nf_conn_timeout *timeout_ext;
+       struct nf_conntrack_zone tmp;
        unsigned int *timeouts;
 
        if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
@@ -932,6 +925,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
                return NULL;
        }
 
+       zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
        ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
                                  hash);
        if (IS_ERR(ct))
@@ -1026,10 +1020,11 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
                  int *set_reply,
                  enum ip_conntrack_info *ctinfo)
 {
+       const struct nf_conntrack_zone *zone;
        struct nf_conntrack_tuple tuple;
        struct nf_conntrack_tuple_hash *h;
+       struct nf_conntrack_zone tmp;
        struct nf_conn *ct;
-       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
        u32 hash;
 
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
@@ -1040,7 +1035,8 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
        }
 
        /* look for tuple match */
-       hash = hash_conntrack_raw(&tuple, zone);
+       zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+       hash = hash_conntrack_raw(&tuple);
        h = __nf_conntrack_find_get(net, zone, &tuple, hash);
        if (!h) {
                h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
@@ -1596,8 +1592,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
                                        struct nf_conntrack_tuple_hash, hnnode);
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        hlist_nulls_del_rcu(&h->hnnode);
-                       bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
-                                                 hashsize);
+                       bucket = __hash_conntrack(&h->tuple, hashsize);
                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
                }
        }