netfilter: nf_conntrack: Add a struct net parameter to l4_pkt_to_tuple
[firefly-linux-kernel-4.4.55.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/types.h>
16 #include <linux/netfilter.h>
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/skbuff.h>
20 #include <linux/proc_fs.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stddef.h>
23 #include <linux/slab.h>
24 #include <linux/random.h>
25 #include <linux/jhash.h>
26 #include <linux/err.h>
27 #include <linux/percpu.h>
28 #include <linux/moduleparam.h>
29 #include <linux/notifier.h>
30 #include <linux/kernel.h>
31 #include <linux/netdevice.h>
32 #include <linux/socket.h>
33 #include <linux/mm.h>
34 #include <linux/nsproxy.h>
35 #include <linux/rculist_nulls.h>
36
37 #include <net/netfilter/nf_conntrack.h>
38 #include <net/netfilter/nf_conntrack_l3proto.h>
39 #include <net/netfilter/nf_conntrack_l4proto.h>
40 #include <net/netfilter/nf_conntrack_expect.h>
41 #include <net/netfilter/nf_conntrack_helper.h>
42 #include <net/netfilter/nf_conntrack_seqadj.h>
43 #include <net/netfilter/nf_conntrack_core.h>
44 #include <net/netfilter/nf_conntrack_extend.h>
45 #include <net/netfilter/nf_conntrack_acct.h>
46 #include <net/netfilter/nf_conntrack_ecache.h>
47 #include <net/netfilter/nf_conntrack_zones.h>
48 #include <net/netfilter/nf_conntrack_timestamp.h>
49 #include <net/netfilter/nf_conntrack_timeout.h>
50 #include <net/netfilter/nf_conntrack_labels.h>
51 #include <net/netfilter/nf_conntrack_synproxy.h>
52 #include <net/netfilter/nf_nat.h>
53 #include <net/netfilter/nf_nat_core.h>
54 #include <net/netfilter/nf_nat_helper.h>
55
56 #define NF_CONNTRACK_VERSION    "0.5.0"
57
58 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
59                                       enum nf_nat_manip_type manip,
60                                       const struct nlattr *attr) __read_mostly;
61 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
62
63 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
64 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
65
66 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
67 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
68
69 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
70 {
71         h1 %= CONNTRACK_LOCKS;
72         h2 %= CONNTRACK_LOCKS;
73         spin_unlock(&nf_conntrack_locks[h1]);
74         if (h1 != h2)
75                 spin_unlock(&nf_conntrack_locks[h2]);
76 }
77
78 /* return true if we need to recompute hashes (in case hash table was resized) */
79 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
80                                      unsigned int h2, unsigned int sequence)
81 {
82         h1 %= CONNTRACK_LOCKS;
83         h2 %= CONNTRACK_LOCKS;
84         if (h1 <= h2) {
85                 spin_lock(&nf_conntrack_locks[h1]);
86                 if (h1 != h2)
87                         spin_lock_nested(&nf_conntrack_locks[h2],
88                                          SINGLE_DEPTH_NESTING);
89         } else {
90                 spin_lock(&nf_conntrack_locks[h2]);
91                 spin_lock_nested(&nf_conntrack_locks[h1],
92                                  SINGLE_DEPTH_NESTING);
93         }
94         if (read_seqcount_retry(&net->ct.generation, sequence)) {
95                 nf_conntrack_double_unlock(h1, h2);
96                 return true;
97         }
98         return false;
99 }
100
101 static void nf_conntrack_all_lock(void)
102 {
103         int i;
104
105         for (i = 0; i < CONNTRACK_LOCKS; i++)
106                 spin_lock_nested(&nf_conntrack_locks[i], i);
107 }
108
109 static void nf_conntrack_all_unlock(void)
110 {
111         int i;
112
113         for (i = 0; i < CONNTRACK_LOCKS; i++)
114                 spin_unlock(&nf_conntrack_locks[i]);
115 }
116
117 unsigned int nf_conntrack_htable_size __read_mostly;
118 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
119
120 unsigned int nf_conntrack_max __read_mostly;
121 EXPORT_SYMBOL_GPL(nf_conntrack_max);
122
123 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
124 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
125
126 unsigned int nf_conntrack_hash_rnd __read_mostly;
127 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
128
129 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
130 {
131         unsigned int n;
132
133         /* The direction must be ignored, so we hash everything up to the
134          * destination ports (which is a multiple of 4) and treat the last
135          * three bytes manually.
136          */
137         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
138         return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
139                       (((__force __u16)tuple->dst.u.all << 16) |
140                       tuple->dst.protonum));
141 }
142
143 static u32 __hash_bucket(u32 hash, unsigned int size)
144 {
145         return reciprocal_scale(hash, size);
146 }
147
148 static u32 hash_bucket(u32 hash, const struct net *net)
149 {
150         return __hash_bucket(hash, net->ct.htable_size);
151 }
152
153 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
154                                   unsigned int size)
155 {
156         return __hash_bucket(hash_conntrack_raw(tuple), size);
157 }
158
159 static inline u_int32_t hash_conntrack(const struct net *net,
160                                        const struct nf_conntrack_tuple *tuple)
161 {
162         return __hash_conntrack(tuple, net->ct.htable_size);
163 }
164
165 bool
166 nf_ct_get_tuple(const struct sk_buff *skb,
167                 unsigned int nhoff,
168                 unsigned int dataoff,
169                 u_int16_t l3num,
170                 u_int8_t protonum,
171                 struct net *net,
172                 struct nf_conntrack_tuple *tuple,
173                 const struct nf_conntrack_l3proto *l3proto,
174                 const struct nf_conntrack_l4proto *l4proto)
175 {
176         memset(tuple, 0, sizeof(*tuple));
177
178         tuple->src.l3num = l3num;
179         if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
180                 return false;
181
182         tuple->dst.protonum = protonum;
183         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
184
185         return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
186 }
187 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
188
189 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
190                        u_int16_t l3num,
191                        struct net *net, struct nf_conntrack_tuple *tuple)
192 {
193         struct nf_conntrack_l3proto *l3proto;
194         struct nf_conntrack_l4proto *l4proto;
195         unsigned int protoff;
196         u_int8_t protonum;
197         int ret;
198
199         rcu_read_lock();
200
201         l3proto = __nf_ct_l3proto_find(l3num);
202         ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
203         if (ret != NF_ACCEPT) {
204                 rcu_read_unlock();
205                 return false;
206         }
207
208         l4proto = __nf_ct_l4proto_find(l3num, protonum);
209
210         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
211                               l3proto, l4proto);
212
213         rcu_read_unlock();
214         return ret;
215 }
216 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
217
218 bool
219 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
220                    const struct nf_conntrack_tuple *orig,
221                    const struct nf_conntrack_l3proto *l3proto,
222                    const struct nf_conntrack_l4proto *l4proto)
223 {
224         memset(inverse, 0, sizeof(*inverse));
225
226         inverse->src.l3num = orig->src.l3num;
227         if (l3proto->invert_tuple(inverse, orig) == 0)
228                 return false;
229
230         inverse->dst.dir = !orig->dst.dir;
231
232         inverse->dst.protonum = orig->dst.protonum;
233         return l4proto->invert_tuple(inverse, orig);
234 }
235 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
236
237 static void
238 clean_from_lists(struct nf_conn *ct)
239 {
240         pr_debug("clean_from_lists(%p)\n", ct);
241         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
242         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
243
244         /* Destroy all pending expectations */
245         nf_ct_remove_expectations(ct);
246 }
247
248 /* must be called with local_bh_disable */
249 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
250 {
251         struct ct_pcpu *pcpu;
252
253         /* add this conntrack to the (per cpu) dying list */
254         ct->cpu = smp_processor_id();
255         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
256
257         spin_lock(&pcpu->lock);
258         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
259                              &pcpu->dying);
260         spin_unlock(&pcpu->lock);
261 }
262
263 /* must be called with local_bh_disable */
264 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
265 {
266         struct ct_pcpu *pcpu;
267
268         /* add this conntrack to the (per cpu) unconfirmed list */
269         ct->cpu = smp_processor_id();
270         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
271
272         spin_lock(&pcpu->lock);
273         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
274                              &pcpu->unconfirmed);
275         spin_unlock(&pcpu->lock);
276 }
277
278 /* must be called with local_bh_disable */
279 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
280 {
281         struct ct_pcpu *pcpu;
282
283         /* We overload first tuple to link into unconfirmed or dying list.*/
284         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
285
286         spin_lock(&pcpu->lock);
287         BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
288         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
289         spin_unlock(&pcpu->lock);
290 }
291
292 /* Released via destroy_conntrack() */
293 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
294                                  const struct nf_conntrack_zone *zone,
295                                  gfp_t flags)
296 {
297         struct nf_conn *tmpl;
298
299         tmpl = kzalloc(sizeof(*tmpl), flags);
300         if (tmpl == NULL)
301                 return NULL;
302
303         tmpl->status = IPS_TEMPLATE;
304         write_pnet(&tmpl->ct_net, net);
305
306         if (nf_ct_zone_add(tmpl, flags, zone) < 0)
307                 goto out_free;
308
309         atomic_set(&tmpl->ct_general.use, 0);
310
311         return tmpl;
312 out_free:
313         kfree(tmpl);
314         return NULL;
315 }
316 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
317
318 void nf_ct_tmpl_free(struct nf_conn *tmpl)
319 {
320         nf_ct_ext_destroy(tmpl);
321         nf_ct_ext_free(tmpl);
322         kfree(tmpl);
323 }
324 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
325
326 static void
327 destroy_conntrack(struct nf_conntrack *nfct)
328 {
329         struct nf_conn *ct = (struct nf_conn *)nfct;
330         struct net *net = nf_ct_net(ct);
331         struct nf_conntrack_l4proto *l4proto;
332
333         pr_debug("destroy_conntrack(%p)\n", ct);
334         NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
335         NF_CT_ASSERT(!timer_pending(&ct->timeout));
336
337         if (unlikely(nf_ct_is_template(ct))) {
338                 nf_ct_tmpl_free(ct);
339                 return;
340         }
341         rcu_read_lock();
342         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
343         if (l4proto && l4proto->destroy)
344                 l4proto->destroy(ct);
345
346         rcu_read_unlock();
347
348         local_bh_disable();
349         /* Expectations will have been removed in clean_from_lists,
350          * except TFTP can create an expectation on the first packet,
351          * before connection is in the list, so we need to clean here,
352          * too.
353          */
354         nf_ct_remove_expectations(ct);
355
356         nf_ct_del_from_dying_or_unconfirmed_list(ct);
357
358         NF_CT_STAT_INC(net, delete);
359         local_bh_enable();
360
361         if (ct->master)
362                 nf_ct_put(ct->master);
363
364         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
365         nf_conntrack_free(ct);
366 }
367
368 static void nf_ct_delete_from_lists(struct nf_conn *ct)
369 {
370         struct net *net = nf_ct_net(ct);
371         unsigned int hash, reply_hash;
372         unsigned int sequence;
373
374         nf_ct_helper_destroy(ct);
375
376         local_bh_disable();
377         do {
378                 sequence = read_seqcount_begin(&net->ct.generation);
379                 hash = hash_conntrack(net,
380                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
381                 reply_hash = hash_conntrack(net,
382                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
383         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
384
385         clean_from_lists(ct);
386         nf_conntrack_double_unlock(hash, reply_hash);
387
388         nf_ct_add_to_dying_list(ct);
389
390         NF_CT_STAT_INC(net, delete_list);
391         local_bh_enable();
392 }
393
394 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
395 {
396         struct nf_conn_tstamp *tstamp;
397
398         tstamp = nf_conn_tstamp_find(ct);
399         if (tstamp && tstamp->stop == 0)
400                 tstamp->stop = ktime_get_real_ns();
401
402         if (nf_ct_is_dying(ct))
403                 goto delete;
404
405         if (nf_conntrack_event_report(IPCT_DESTROY, ct,
406                                     portid, report) < 0) {
407                 /* destroy event was not delivered */
408                 nf_ct_delete_from_lists(ct);
409                 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
410                 return false;
411         }
412
413         nf_conntrack_ecache_work(nf_ct_net(ct));
414         set_bit(IPS_DYING_BIT, &ct->status);
415  delete:
416         nf_ct_delete_from_lists(ct);
417         nf_ct_put(ct);
418         return true;
419 }
420 EXPORT_SYMBOL_GPL(nf_ct_delete);
421
422 static void death_by_timeout(unsigned long ul_conntrack)
423 {
424         nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
425 }
426
427 static inline bool
428 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
429                 const struct nf_conntrack_tuple *tuple,
430                 const struct nf_conntrack_zone *zone)
431 {
432         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
433
434         /* A conntrack can be recreated with the equal tuple,
435          * so we need to check that the conntrack is confirmed
436          */
437         return nf_ct_tuple_equal(tuple, &h->tuple) &&
438                nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
439                nf_ct_is_confirmed(ct);
440 }
441
442 /*
443  * Warning :
444  * - Caller must take a reference on returned object
445  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
446  */
447 static struct nf_conntrack_tuple_hash *
448 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
449                       const struct nf_conntrack_tuple *tuple, u32 hash)
450 {
451         struct nf_conntrack_tuple_hash *h;
452         struct hlist_nulls_node *n;
453         unsigned int bucket = hash_bucket(hash, net);
454
455         /* Disable BHs the entire time since we normally need to disable them
456          * at least once for the stats anyway.
457          */
458         local_bh_disable();
459 begin:
460         hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
461                 if (nf_ct_key_equal(h, tuple, zone)) {
462                         NF_CT_STAT_INC(net, found);
463                         local_bh_enable();
464                         return h;
465                 }
466                 NF_CT_STAT_INC(net, searched);
467         }
468         /*
469          * if the nulls value we got at the end of this lookup is
470          * not the expected one, we must restart lookup.
471          * We probably met an item that was moved to another chain.
472          */
473         if (get_nulls_value(n) != bucket) {
474                 NF_CT_STAT_INC(net, search_restart);
475                 goto begin;
476         }
477         local_bh_enable();
478
479         return NULL;
480 }
481
482 /* Find a connection corresponding to a tuple. */
483 static struct nf_conntrack_tuple_hash *
484 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
485                         const struct nf_conntrack_tuple *tuple, u32 hash)
486 {
487         struct nf_conntrack_tuple_hash *h;
488         struct nf_conn *ct;
489
490         rcu_read_lock();
491 begin:
492         h = ____nf_conntrack_find(net, zone, tuple, hash);
493         if (h) {
494                 ct = nf_ct_tuplehash_to_ctrack(h);
495                 if (unlikely(nf_ct_is_dying(ct) ||
496                              !atomic_inc_not_zero(&ct->ct_general.use)))
497                         h = NULL;
498                 else {
499                         if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
500                                 nf_ct_put(ct);
501                                 goto begin;
502                         }
503                 }
504         }
505         rcu_read_unlock();
506
507         return h;
508 }
509
510 struct nf_conntrack_tuple_hash *
511 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
512                       const struct nf_conntrack_tuple *tuple)
513 {
514         return __nf_conntrack_find_get(net, zone, tuple,
515                                        hash_conntrack_raw(tuple));
516 }
517 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
518
519 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
520                                        unsigned int hash,
521                                        unsigned int reply_hash)
522 {
523         struct net *net = nf_ct_net(ct);
524
525         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
526                            &net->ct.hash[hash]);
527         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
528                            &net->ct.hash[reply_hash]);
529 }
530
531 int
532 nf_conntrack_hash_check_insert(struct nf_conn *ct)
533 {
534         const struct nf_conntrack_zone *zone;
535         struct net *net = nf_ct_net(ct);
536         unsigned int hash, reply_hash;
537         struct nf_conntrack_tuple_hash *h;
538         struct hlist_nulls_node *n;
539         unsigned int sequence;
540
541         zone = nf_ct_zone(ct);
542
543         local_bh_disable();
544         do {
545                 sequence = read_seqcount_begin(&net->ct.generation);
546                 hash = hash_conntrack(net,
547                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
548                 reply_hash = hash_conntrack(net,
549                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
550         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
551
552         /* See if there's one in the list already, including reverse */
553         hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
554                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
555                                       &h->tuple) &&
556                     nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
557                                      NF_CT_DIRECTION(h)))
558                         goto out;
559         hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
560                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
561                                       &h->tuple) &&
562                     nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
563                                      NF_CT_DIRECTION(h)))
564                         goto out;
565
566         add_timer(&ct->timeout);
567         smp_wmb();
568         /* The caller holds a reference to this object */
569         atomic_set(&ct->ct_general.use, 2);
570         __nf_conntrack_hash_insert(ct, hash, reply_hash);
571         nf_conntrack_double_unlock(hash, reply_hash);
572         NF_CT_STAT_INC(net, insert);
573         local_bh_enable();
574         return 0;
575
576 out:
577         nf_conntrack_double_unlock(hash, reply_hash);
578         NF_CT_STAT_INC(net, insert_failed);
579         local_bh_enable();
580         return -EEXIST;
581 }
582 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
583
584 /* Confirm a connection given skb; places it in hash table */
585 int
586 __nf_conntrack_confirm(struct sk_buff *skb)
587 {
588         const struct nf_conntrack_zone *zone;
589         unsigned int hash, reply_hash;
590         struct nf_conntrack_tuple_hash *h;
591         struct nf_conn *ct;
592         struct nf_conn_help *help;
593         struct nf_conn_tstamp *tstamp;
594         struct hlist_nulls_node *n;
595         enum ip_conntrack_info ctinfo;
596         struct net *net;
597         unsigned int sequence;
598
599         ct = nf_ct_get(skb, &ctinfo);
600         net = nf_ct_net(ct);
601
602         /* ipt_REJECT uses nf_conntrack_attach to attach related
603            ICMP/TCP RST packets in other direction.  Actual packet
604            which created connection will be IP_CT_NEW or for an
605            expected connection, IP_CT_RELATED. */
606         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
607                 return NF_ACCEPT;
608
609         zone = nf_ct_zone(ct);
610         local_bh_disable();
611
612         do {
613                 sequence = read_seqcount_begin(&net->ct.generation);
614                 /* reuse the hash saved before */
615                 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
616                 hash = hash_bucket(hash, net);
617                 reply_hash = hash_conntrack(net,
618                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
619
620         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
621
622         /* We're not in hash table, and we refuse to set up related
623          * connections for unconfirmed conns.  But packet copies and
624          * REJECT will give spurious warnings here.
625          */
626         /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
627
628         /* No external references means no one else could have
629          * confirmed us.
630          */
631         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
632         pr_debug("Confirming conntrack %p\n", ct);
633         /* We have to check the DYING flag after unlink to prevent
634          * a race against nf_ct_get_next_corpse() possibly called from
635          * user context, else we insert an already 'dead' hash, blocking
636          * further use of that particular connection -JM.
637          */
638         nf_ct_del_from_dying_or_unconfirmed_list(ct);
639
640         if (unlikely(nf_ct_is_dying(ct)))
641                 goto out;
642
643         /* See if there's one in the list already, including reverse:
644            NAT could have grabbed it without realizing, since we're
645            not in the hash.  If there is, we lost race. */
646         hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
647                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
648                                       &h->tuple) &&
649                     nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
650                                      NF_CT_DIRECTION(h)))
651                         goto out;
652         hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
653                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
654                                       &h->tuple) &&
655                     nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
656                                      NF_CT_DIRECTION(h)))
657                         goto out;
658
659         /* Timer relative to confirmation time, not original
660            setting time, otherwise we'd get timer wrap in
661            weird delay cases. */
662         ct->timeout.expires += jiffies;
663         add_timer(&ct->timeout);
664         atomic_inc(&ct->ct_general.use);
665         ct->status |= IPS_CONFIRMED;
666
667         /* set conntrack timestamp, if enabled. */
668         tstamp = nf_conn_tstamp_find(ct);
669         if (tstamp) {
670                 if (skb->tstamp.tv64 == 0)
671                         __net_timestamp(skb);
672
673                 tstamp->start = ktime_to_ns(skb->tstamp);
674         }
675         /* Since the lookup is lockless, hash insertion must be done after
676          * starting the timer and setting the CONFIRMED bit. The RCU barriers
677          * guarantee that no other CPU can find the conntrack before the above
678          * stores are visible.
679          */
680         __nf_conntrack_hash_insert(ct, hash, reply_hash);
681         nf_conntrack_double_unlock(hash, reply_hash);
682         NF_CT_STAT_INC(net, insert);
683         local_bh_enable();
684
685         help = nfct_help(ct);
686         if (help && help->helper)
687                 nf_conntrack_event_cache(IPCT_HELPER, ct);
688
689         nf_conntrack_event_cache(master_ct(ct) ?
690                                  IPCT_RELATED : IPCT_NEW, ct);
691         return NF_ACCEPT;
692
693 out:
694         nf_ct_add_to_dying_list(ct);
695         nf_conntrack_double_unlock(hash, reply_hash);
696         NF_CT_STAT_INC(net, insert_failed);
697         local_bh_enable();
698         return NF_DROP;
699 }
700 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
701
702 /* Returns true if a connection correspondings to the tuple (required
703    for NAT). */
704 int
705 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
706                          const struct nf_conn *ignored_conntrack)
707 {
708         struct net *net = nf_ct_net(ignored_conntrack);
709         const struct nf_conntrack_zone *zone;
710         struct nf_conntrack_tuple_hash *h;
711         struct hlist_nulls_node *n;
712         struct nf_conn *ct;
713         unsigned int hash;
714
715         zone = nf_ct_zone(ignored_conntrack);
716         hash = hash_conntrack(net, tuple);
717
718         /* Disable BHs the entire time since we need to disable them at
719          * least once for the stats anyway.
720          */
721         rcu_read_lock_bh();
722         hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
723                 ct = nf_ct_tuplehash_to_ctrack(h);
724                 if (ct != ignored_conntrack &&
725                     nf_ct_tuple_equal(tuple, &h->tuple) &&
726                     nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
727                         NF_CT_STAT_INC(net, found);
728                         rcu_read_unlock_bh();
729                         return 1;
730                 }
731                 NF_CT_STAT_INC(net, searched);
732         }
733         rcu_read_unlock_bh();
734
735         return 0;
736 }
737 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
738
739 #define NF_CT_EVICTION_RANGE    8
740
741 /* There's a small race here where we may free a just-assured
742    connection.  Too bad: we're in trouble anyway. */
743 static noinline int early_drop(struct net *net, unsigned int _hash)
744 {
745         /* Use oldest entry, which is roughly LRU */
746         struct nf_conntrack_tuple_hash *h;
747         struct nf_conn *ct = NULL, *tmp;
748         struct hlist_nulls_node *n;
749         unsigned int i = 0, cnt = 0;
750         int dropped = 0;
751         unsigned int hash, sequence;
752         spinlock_t *lockp;
753
754         local_bh_disable();
755 restart:
756         sequence = read_seqcount_begin(&net->ct.generation);
757         hash = hash_bucket(_hash, net);
758         for (; i < net->ct.htable_size; i++) {
759                 lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
760                 spin_lock(lockp);
761                 if (read_seqcount_retry(&net->ct.generation, sequence)) {
762                         spin_unlock(lockp);
763                         goto restart;
764                 }
765                 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
766                                          hnnode) {
767                         tmp = nf_ct_tuplehash_to_ctrack(h);
768                         if (!test_bit(IPS_ASSURED_BIT, &tmp->status) &&
769                             !nf_ct_is_dying(tmp) &&
770                             atomic_inc_not_zero(&tmp->ct_general.use)) {
771                                 ct = tmp;
772                                 break;
773                         }
774                         cnt++;
775                 }
776
777                 hash = (hash + 1) % net->ct.htable_size;
778                 spin_unlock(lockp);
779
780                 if (ct || cnt >= NF_CT_EVICTION_RANGE)
781                         break;
782
783         }
784         local_bh_enable();
785
786         if (!ct)
787                 return dropped;
788
789         if (del_timer(&ct->timeout)) {
790                 if (nf_ct_delete(ct, 0, 0)) {
791                         dropped = 1;
792                         NF_CT_STAT_INC_ATOMIC(net, early_drop);
793                 }
794         }
795         nf_ct_put(ct);
796         return dropped;
797 }
798
799 void init_nf_conntrack_hash_rnd(void)
800 {
801         unsigned int rand;
802
803         /*
804          * Why not initialize nf_conntrack_rnd in a "init()" function ?
805          * Because there isn't enough entropy when system initializing,
806          * and we initialize it as late as possible.
807          */
808         do {
809                 get_random_bytes(&rand, sizeof(rand));
810         } while (!rand);
811         cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
812 }
813
814 static struct nf_conn *
815 __nf_conntrack_alloc(struct net *net,
816                      const struct nf_conntrack_zone *zone,
817                      const struct nf_conntrack_tuple *orig,
818                      const struct nf_conntrack_tuple *repl,
819                      gfp_t gfp, u32 hash)
820 {
821         struct nf_conn *ct;
822
823         if (unlikely(!nf_conntrack_hash_rnd)) {
824                 init_nf_conntrack_hash_rnd();
825                 /* recompute the hash as nf_conntrack_hash_rnd is initialized */
826                 hash = hash_conntrack_raw(orig);
827         }
828
829         /* We don't want any race condition at early drop stage */
830         atomic_inc(&net->ct.count);
831
832         if (nf_conntrack_max &&
833             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
834                 if (!early_drop(net, hash)) {
835                         atomic_dec(&net->ct.count);
836                         net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
837                         return ERR_PTR(-ENOMEM);
838                 }
839         }
840
841         /*
842          * Do not use kmem_cache_zalloc(), as this cache uses
843          * SLAB_DESTROY_BY_RCU.
844          */
845         ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
846         if (ct == NULL)
847                 goto out;
848
849         spin_lock_init(&ct->lock);
850         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
851         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
852         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
853         /* save hash for reusing when confirming */
854         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
855         ct->status = 0;
856         /* Don't set timer yet: wait for confirmation */
857         setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
858         write_pnet(&ct->ct_net, net);
859         memset(&ct->__nfct_init_offset[0], 0,
860                offsetof(struct nf_conn, proto) -
861                offsetof(struct nf_conn, __nfct_init_offset[0]));
862
863         if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
864                 goto out_free;
865
866         /* Because we use RCU lookups, we set ct_general.use to zero before
867          * this is inserted in any list.
868          */
869         atomic_set(&ct->ct_general.use, 0);
870         return ct;
871 out_free:
872         kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
873 out:
874         atomic_dec(&net->ct.count);
875         return ERR_PTR(-ENOMEM);
876 }
877
878 struct nf_conn *nf_conntrack_alloc(struct net *net,
879                                    const struct nf_conntrack_zone *zone,
880                                    const struct nf_conntrack_tuple *orig,
881                                    const struct nf_conntrack_tuple *repl,
882                                    gfp_t gfp)
883 {
884         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
885 }
886 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
887
888 void nf_conntrack_free(struct nf_conn *ct)
889 {
890         struct net *net = nf_ct_net(ct);
891
892         /* A freed object has refcnt == 0, that's
893          * the golden rule for SLAB_DESTROY_BY_RCU
894          */
895         NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
896
897         nf_ct_ext_destroy(ct);
898         nf_ct_ext_free(ct);
899         kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
900         smp_mb__before_atomic();
901         atomic_dec(&net->ct.count);
902 }
903 EXPORT_SYMBOL_GPL(nf_conntrack_free);
904
905
906 /* Allocate a new conntrack: we return -ENOMEM if classification
907    failed due to stress.  Otherwise it really is unclassifiable. */
908 static struct nf_conntrack_tuple_hash *
909 init_conntrack(struct net *net, struct nf_conn *tmpl,
910                const struct nf_conntrack_tuple *tuple,
911                struct nf_conntrack_l3proto *l3proto,
912                struct nf_conntrack_l4proto *l4proto,
913                struct sk_buff *skb,
914                unsigned int dataoff, u32 hash)
915 {
916         struct nf_conn *ct;
917         struct nf_conn_help *help;
918         struct nf_conntrack_tuple repl_tuple;
919         struct nf_conntrack_ecache *ecache;
920         struct nf_conntrack_expect *exp = NULL;
921         const struct nf_conntrack_zone *zone;
922         struct nf_conn_timeout *timeout_ext;
923         struct nf_conntrack_zone tmp;
924         unsigned int *timeouts;
925
926         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
927                 pr_debug("Can't invert tuple.\n");
928                 return NULL;
929         }
930
931         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
932         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
933                                   hash);
934         if (IS_ERR(ct))
935                 return (struct nf_conntrack_tuple_hash *)ct;
936
937         if (tmpl && nfct_synproxy(tmpl)) {
938                 nfct_seqadj_ext_add(ct);
939                 nfct_synproxy_ext_add(ct);
940         }
941
942         timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
943         if (timeout_ext)
944                 timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
945         else
946                 timeouts = l4proto->get_timeouts(net);
947
948         if (!l4proto->new(ct, skb, dataoff, timeouts)) {
949                 nf_conntrack_free(ct);
950                 pr_debug("init conntrack: can't track with proto module\n");
951                 return NULL;
952         }
953
954         if (timeout_ext)
955                 nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC);
956
957         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
958         nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
959         nf_ct_labels_ext_add(ct);
960
961         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
962         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
963                                  ecache ? ecache->expmask : 0,
964                              GFP_ATOMIC);
965
966         local_bh_disable();
967         if (net->ct.expect_count) {
968                 spin_lock(&nf_conntrack_expect_lock);
969                 exp = nf_ct_find_expectation(net, zone, tuple);
970                 if (exp) {
971                         pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
972                                  ct, exp);
973                         /* Welcome, Mr. Bond.  We've been expecting you... */
974                         __set_bit(IPS_EXPECTED_BIT, &ct->status);
975                         /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
976                         ct->master = exp->master;
977                         if (exp->helper) {
978                                 help = nf_ct_helper_ext_add(ct, exp->helper,
979                                                             GFP_ATOMIC);
980                                 if (help)
981                                         rcu_assign_pointer(help->helper, exp->helper);
982                         }
983
984 #ifdef CONFIG_NF_CONNTRACK_MARK
985                         ct->mark = exp->master->mark;
986 #endif
987 #ifdef CONFIG_NF_CONNTRACK_SECMARK
988                         ct->secmark = exp->master->secmark;
989 #endif
990                         NF_CT_STAT_INC(net, expect_new);
991                 }
992                 spin_unlock(&nf_conntrack_expect_lock);
993         }
994         if (!exp) {
995                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
996                 NF_CT_STAT_INC(net, new);
997         }
998
999         /* Now it is inserted into the unconfirmed list, bump refcount */
1000         nf_conntrack_get(&ct->ct_general);
1001         nf_ct_add_to_unconfirmed_list(ct);
1002
1003         local_bh_enable();
1004
1005         if (exp) {
1006                 if (exp->expectfn)
1007                         exp->expectfn(ct, exp);
1008                 nf_ct_expect_put(exp);
1009         }
1010
1011         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1012 }
1013
1014 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
1015 static inline struct nf_conn *
1016 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1017                   struct sk_buff *skb,
1018                   unsigned int dataoff,
1019                   u_int16_t l3num,
1020                   u_int8_t protonum,
1021                   struct nf_conntrack_l3proto *l3proto,
1022                   struct nf_conntrack_l4proto *l4proto,
1023                   int *set_reply,
1024                   enum ip_conntrack_info *ctinfo)
1025 {
1026         const struct nf_conntrack_zone *zone;
1027         struct nf_conntrack_tuple tuple;
1028         struct nf_conntrack_tuple_hash *h;
1029         struct nf_conntrack_zone tmp;
1030         struct nf_conn *ct;
1031         u32 hash;
1032
1033         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1034                              dataoff, l3num, protonum, net, &tuple, l3proto,
1035                              l4proto)) {
1036                 pr_debug("resolve_normal_ct: Can't get tuple\n");
1037                 return NULL;
1038         }
1039
1040         /* look for tuple match */
1041         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1042         hash = hash_conntrack_raw(&tuple);
1043         h = __nf_conntrack_find_get(net, zone, &tuple, hash);
1044         if (!h) {
1045                 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
1046                                    skb, dataoff, hash);
1047                 if (!h)
1048                         return NULL;
1049                 if (IS_ERR(h))
1050                         return (void *)h;
1051         }
1052         ct = nf_ct_tuplehash_to_ctrack(h);
1053
1054         /* It exists; we have (non-exclusive) reference. */
1055         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1056                 *ctinfo = IP_CT_ESTABLISHED_REPLY;
1057                 /* Please set reply bit if this packet OK */
1058                 *set_reply = 1;
1059         } else {
1060                 /* Once we've had two way comms, always ESTABLISHED. */
1061                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1062                         pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
1063                         *ctinfo = IP_CT_ESTABLISHED;
1064                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1065                         pr_debug("nf_conntrack_in: related packet for %p\n",
1066                                  ct);
1067                         *ctinfo = IP_CT_RELATED;
1068                 } else {
1069                         pr_debug("nf_conntrack_in: new packet for %p\n", ct);
1070                         *ctinfo = IP_CT_NEW;
1071                 }
1072                 *set_reply = 0;
1073         }
1074         skb->nfct = &ct->ct_general;
1075         skb->nfctinfo = *ctinfo;
1076         return ct;
1077 }
1078
1079 unsigned int
1080 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1081                 struct sk_buff *skb)
1082 {
1083         struct nf_conn *ct, *tmpl = NULL;
1084         enum ip_conntrack_info ctinfo;
1085         struct nf_conntrack_l3proto *l3proto;
1086         struct nf_conntrack_l4proto *l4proto;
1087         unsigned int *timeouts;
1088         unsigned int dataoff;
1089         u_int8_t protonum;
1090         int set_reply = 0;
1091         int ret;
1092
1093         if (skb->nfct) {
1094                 /* Previously seen (loopback or untracked)?  Ignore. */
1095                 tmpl = (struct nf_conn *)skb->nfct;
1096                 if (!nf_ct_is_template(tmpl)) {
1097                         NF_CT_STAT_INC_ATOMIC(net, ignore);
1098                         return NF_ACCEPT;
1099                 }
1100                 skb->nfct = NULL;
1101         }
1102
1103         /* rcu_read_lock()ed by nf_hook_slow */
1104         l3proto = __nf_ct_l3proto_find(pf);
1105         ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
1106                                    &dataoff, &protonum);
1107         if (ret <= 0) {
1108                 pr_debug("not prepared to track yet or error occurred\n");
1109                 NF_CT_STAT_INC_ATOMIC(net, error);
1110                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1111                 ret = -ret;
1112                 goto out;
1113         }
1114
1115         l4proto = __nf_ct_l4proto_find(pf, protonum);
1116
1117         /* It may be an special packet, error, unclean...
1118          * inverse of the return code tells to the netfilter
1119          * core what to do with the packet. */
1120         if (l4proto->error != NULL) {
1121                 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
1122                                      pf, hooknum);
1123                 if (ret <= 0) {
1124                         NF_CT_STAT_INC_ATOMIC(net, error);
1125                         NF_CT_STAT_INC_ATOMIC(net, invalid);
1126                         ret = -ret;
1127                         goto out;
1128                 }
1129                 /* ICMP[v6] protocol trackers may assign one conntrack. */
1130                 if (skb->nfct)
1131                         goto out;
1132         }
1133
1134         ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
1135                                l3proto, l4proto, &set_reply, &ctinfo);
1136         if (!ct) {
1137                 /* Not valid part of a connection */
1138                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1139                 ret = NF_ACCEPT;
1140                 goto out;
1141         }
1142
1143         if (IS_ERR(ct)) {
1144                 /* Too stressed to deal. */
1145                 NF_CT_STAT_INC_ATOMIC(net, drop);
1146                 ret = NF_DROP;
1147                 goto out;
1148         }
1149
1150         NF_CT_ASSERT(skb->nfct);
1151
1152         /* Decide what timeout policy we want to apply to this flow. */
1153         timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1154
1155         ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1156         if (ret <= 0) {
1157                 /* Invalid: inverse of the return code tells
1158                  * the netfilter core what to do */
1159                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1160                 nf_conntrack_put(skb->nfct);
1161                 skb->nfct = NULL;
1162                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1163                 if (ret == -NF_DROP)
1164                         NF_CT_STAT_INC_ATOMIC(net, drop);
1165                 ret = -ret;
1166                 goto out;
1167         }
1168
1169         if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1170                 nf_conntrack_event_cache(IPCT_REPLY, ct);
1171 out:
1172         if (tmpl) {
1173                 /* Special case: we have to repeat this hook, assign the
1174                  * template again to this packet. We assume that this packet
1175                  * has no conntrack assigned. This is used by nf_ct_tcp. */
1176                 if (ret == NF_REPEAT)
1177                         skb->nfct = (struct nf_conntrack *)tmpl;
1178                 else
1179                         nf_ct_put(tmpl);
1180         }
1181
1182         return ret;
1183 }
1184 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1185
1186 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1187                           const struct nf_conntrack_tuple *orig)
1188 {
1189         bool ret;
1190
1191         rcu_read_lock();
1192         ret = nf_ct_invert_tuple(inverse, orig,
1193                                  __nf_ct_l3proto_find(orig->src.l3num),
1194                                  __nf_ct_l4proto_find(orig->src.l3num,
1195                                                       orig->dst.protonum));
1196         rcu_read_unlock();
1197         return ret;
1198 }
1199 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1200
1201 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1202    implicitly racy: see __nf_conntrack_confirm */
1203 void nf_conntrack_alter_reply(struct nf_conn *ct,
1204                               const struct nf_conntrack_tuple *newreply)
1205 {
1206         struct nf_conn_help *help = nfct_help(ct);
1207
1208         /* Should be unconfirmed, so not in hash table yet */
1209         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1210
1211         pr_debug("Altering reply tuple of %p to ", ct);
1212         nf_ct_dump_tuple(newreply);
1213
1214         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1215         if (ct->master || (help && !hlist_empty(&help->expectations)))
1216                 return;
1217
1218         rcu_read_lock();
1219         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1220         rcu_read_unlock();
1221 }
1222 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1223
1224 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1225 void __nf_ct_refresh_acct(struct nf_conn *ct,
1226                           enum ip_conntrack_info ctinfo,
1227                           const struct sk_buff *skb,
1228                           unsigned long extra_jiffies,
1229                           int do_acct)
1230 {
1231         NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1232         NF_CT_ASSERT(skb);
1233
1234         /* Only update if this is not a fixed timeout */
1235         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1236                 goto acct;
1237
1238         /* If not in hash table, timer will not be active yet */
1239         if (!nf_ct_is_confirmed(ct)) {
1240                 ct->timeout.expires = extra_jiffies;
1241         } else {
1242                 unsigned long newtime = jiffies + extra_jiffies;
1243
1244                 /* Only update the timeout if the new timeout is at least
1245                    HZ jiffies from the old timeout. Need del_timer for race
1246                    avoidance (may already be dying). */
1247                 if (newtime - ct->timeout.expires >= HZ)
1248                         mod_timer_pending(&ct->timeout, newtime);
1249         }
1250
1251 acct:
1252         if (do_acct) {
1253                 struct nf_conn_acct *acct;
1254
1255                 acct = nf_conn_acct_find(ct);
1256                 if (acct) {
1257                         struct nf_conn_counter *counter = acct->counter;
1258
1259                         atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
1260                         atomic64_add(skb->len, &counter[CTINFO2DIR(ctinfo)].bytes);
1261                 }
1262         }
1263 }
1264 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1265
1266 bool __nf_ct_kill_acct(struct nf_conn *ct,
1267                        enum ip_conntrack_info ctinfo,
1268                        const struct sk_buff *skb,
1269                        int do_acct)
1270 {
1271         if (do_acct) {
1272                 struct nf_conn_acct *acct;
1273
1274                 acct = nf_conn_acct_find(ct);
1275                 if (acct) {
1276                         struct nf_conn_counter *counter = acct->counter;
1277
1278                         atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
1279                         atomic64_add(skb->len - skb_network_offset(skb),
1280                                      &counter[CTINFO2DIR(ctinfo)].bytes);
1281                 }
1282         }
1283
1284         if (del_timer(&ct->timeout)) {
1285                 ct->timeout.function((unsigned long)ct);
1286                 return true;
1287         }
1288         return false;
1289 }
1290 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1291
1292 #ifdef CONFIG_NF_CONNTRACK_ZONES
1293 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1294         .len    = sizeof(struct nf_conntrack_zone),
1295         .align  = __alignof__(struct nf_conntrack_zone),
1296         .id     = NF_CT_EXT_ZONE,
1297 };
1298 #endif
1299
1300 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1301
1302 #include <linux/netfilter/nfnetlink.h>
1303 #include <linux/netfilter/nfnetlink_conntrack.h>
1304 #include <linux/mutex.h>
1305
1306 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1307  * in ip_conntrack_core, since we don't want the protocols to autoload
1308  * or depend on ctnetlink */
1309 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1310                                const struct nf_conntrack_tuple *tuple)
1311 {
1312         if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1313             nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1314                 goto nla_put_failure;
1315         return 0;
1316
1317 nla_put_failure:
1318         return -1;
1319 }
1320 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1321
1322 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1323         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1324         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1325 };
1326 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1327
1328 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1329                                struct nf_conntrack_tuple *t)
1330 {
1331         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1332                 return -EINVAL;
1333
1334         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1335         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1336
1337         return 0;
1338 }
1339 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1340
1341 int nf_ct_port_nlattr_tuple_size(void)
1342 {
1343         return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1344 }
1345 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1346 #endif
1347
1348 /* Used by ipt_REJECT and ip6t_REJECT. */
1349 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1350 {
1351         struct nf_conn *ct;
1352         enum ip_conntrack_info ctinfo;
1353
1354         /* This ICMP is in reverse direction to the packet which caused it */
1355         ct = nf_ct_get(skb, &ctinfo);
1356         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1357                 ctinfo = IP_CT_RELATED_REPLY;
1358         else
1359                 ctinfo = IP_CT_RELATED;
1360
1361         /* Attach to new skbuff, and increment count */
1362         nskb->nfct = &ct->ct_general;
1363         nskb->nfctinfo = ctinfo;
1364         nf_conntrack_get(nskb->nfct);
1365 }
1366
1367 /* Bring out ya dead! */
1368 static struct nf_conn *
1369 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1370                 void *data, unsigned int *bucket)
1371 {
1372         struct nf_conntrack_tuple_hash *h;
1373         struct nf_conn *ct;
1374         struct hlist_nulls_node *n;
1375         int cpu;
1376         spinlock_t *lockp;
1377
1378         for (; *bucket < net->ct.htable_size; (*bucket)++) {
1379                 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1380                 local_bh_disable();
1381                 spin_lock(lockp);
1382                 if (*bucket < net->ct.htable_size) {
1383                         hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1384                                 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1385                                         continue;
1386                                 ct = nf_ct_tuplehash_to_ctrack(h);
1387                                 if (iter(ct, data))
1388                                         goto found;
1389                         }
1390                 }
1391                 spin_unlock(lockp);
1392                 local_bh_enable();
1393         }
1394
1395         for_each_possible_cpu(cpu) {
1396                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1397
1398                 spin_lock_bh(&pcpu->lock);
1399                 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1400                         ct = nf_ct_tuplehash_to_ctrack(h);
1401                         if (iter(ct, data))
1402                                 set_bit(IPS_DYING_BIT, &ct->status);
1403                 }
1404                 spin_unlock_bh(&pcpu->lock);
1405         }
1406         return NULL;
1407 found:
1408         atomic_inc(&ct->ct_general.use);
1409         spin_unlock(lockp);
1410         local_bh_enable();
1411         return ct;
1412 }
1413
1414 void nf_ct_iterate_cleanup(struct net *net,
1415                            int (*iter)(struct nf_conn *i, void *data),
1416                            void *data, u32 portid, int report)
1417 {
1418         struct nf_conn *ct;
1419         unsigned int bucket = 0;
1420
1421         while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1422                 /* Time to push up daises... */
1423                 if (del_timer(&ct->timeout))
1424                         nf_ct_delete(ct, portid, report);
1425
1426                 /* ... else the timer will get him soon. */
1427
1428                 nf_ct_put(ct);
1429         }
1430 }
1431 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1432
1433 static int kill_all(struct nf_conn *i, void *data)
1434 {
1435         return 1;
1436 }
1437
1438 void nf_ct_free_hashtable(void *hash, unsigned int size)
1439 {
1440         if (is_vmalloc_addr(hash))
1441                 vfree(hash);
1442         else
1443                 free_pages((unsigned long)hash,
1444                            get_order(sizeof(struct hlist_head) * size));
1445 }
1446 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1447
1448 static int untrack_refs(void)
1449 {
1450         int cnt = 0, cpu;
1451
1452         for_each_possible_cpu(cpu) {
1453                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1454
1455                 cnt += atomic_read(&ct->ct_general.use) - 1;
1456         }
1457         return cnt;
1458 }
1459
1460 void nf_conntrack_cleanup_start(void)
1461 {
1462         RCU_INIT_POINTER(ip_ct_attach, NULL);
1463 }
1464
1465 void nf_conntrack_cleanup_end(void)
1466 {
1467         RCU_INIT_POINTER(nf_ct_destroy, NULL);
1468         while (untrack_refs() > 0)
1469                 schedule();
1470
1471 #ifdef CONFIG_NF_CONNTRACK_ZONES
1472         nf_ct_extend_unregister(&nf_ct_zone_extend);
1473 #endif
1474         nf_conntrack_proto_fini();
1475         nf_conntrack_seqadj_fini();
1476         nf_conntrack_labels_fini();
1477         nf_conntrack_helper_fini();
1478         nf_conntrack_timeout_fini();
1479         nf_conntrack_ecache_fini();
1480         nf_conntrack_tstamp_fini();
1481         nf_conntrack_acct_fini();
1482         nf_conntrack_expect_fini();
1483 }
1484
1485 /*
1486  * Mishearing the voices in his head, our hero wonders how he's
1487  * supposed to kill the mall.
1488  */
1489 void nf_conntrack_cleanup_net(struct net *net)
1490 {
1491         LIST_HEAD(single);
1492
1493         list_add(&net->exit_list, &single);
1494         nf_conntrack_cleanup_net_list(&single);
1495 }
1496
1497 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1498 {
1499         int busy;
1500         struct net *net;
1501
1502         /*
1503          * This makes sure all current packets have passed through
1504          *  netfilter framework.  Roll on, two-stage module
1505          *  delete...
1506          */
1507         synchronize_net();
1508 i_see_dead_people:
1509         busy = 0;
1510         list_for_each_entry(net, net_exit_list, exit_list) {
1511                 nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
1512                 if (atomic_read(&net->ct.count) != 0)
1513                         busy = 1;
1514         }
1515         if (busy) {
1516                 schedule();
1517                 goto i_see_dead_people;
1518         }
1519
1520         list_for_each_entry(net, net_exit_list, exit_list) {
1521                 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1522                 nf_conntrack_proto_pernet_fini(net);
1523                 nf_conntrack_helper_pernet_fini(net);
1524                 nf_conntrack_ecache_pernet_fini(net);
1525                 nf_conntrack_tstamp_pernet_fini(net);
1526                 nf_conntrack_acct_pernet_fini(net);
1527                 nf_conntrack_expect_pernet_fini(net);
1528                 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1529                 kfree(net->ct.slabname);
1530                 free_percpu(net->ct.stat);
1531                 free_percpu(net->ct.pcpu_lists);
1532         }
1533 }
1534
1535 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1536 {
1537         struct hlist_nulls_head *hash;
1538         unsigned int nr_slots, i;
1539         size_t sz;
1540
1541         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1542         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1543         sz = nr_slots * sizeof(struct hlist_nulls_head);
1544         hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1545                                         get_order(sz));
1546         if (!hash)
1547                 hash = vzalloc(sz);
1548
1549         if (hash && nulls)
1550                 for (i = 0; i < nr_slots; i++)
1551                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
1552
1553         return hash;
1554 }
1555 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1556
1557 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1558 {
1559         int i, bucket, rc;
1560         unsigned int hashsize, old_size;
1561         struct hlist_nulls_head *hash, *old_hash;
1562         struct nf_conntrack_tuple_hash *h;
1563         struct nf_conn *ct;
1564
1565         if (current->nsproxy->net_ns != &init_net)
1566                 return -EOPNOTSUPP;
1567
1568         /* On boot, we can set this without any fancy locking. */
1569         if (!nf_conntrack_htable_size)
1570                 return param_set_uint(val, kp);
1571
1572         rc = kstrtouint(val, 0, &hashsize);
1573         if (rc)
1574                 return rc;
1575         if (!hashsize)
1576                 return -EINVAL;
1577
1578         hash = nf_ct_alloc_hashtable(&hashsize, 1);
1579         if (!hash)
1580                 return -ENOMEM;
1581
1582         local_bh_disable();
1583         nf_conntrack_all_lock();
1584         write_seqcount_begin(&init_net.ct.generation);
1585
1586         /* Lookups in the old hash might happen in parallel, which means we
1587          * might get false negatives during connection lookup. New connections
1588          * created because of a false negative won't make it into the hash
1589          * though since that required taking the locks.
1590          */
1591
1592         for (i = 0; i < init_net.ct.htable_size; i++) {
1593                 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1594                         h = hlist_nulls_entry(init_net.ct.hash[i].first,
1595                                         struct nf_conntrack_tuple_hash, hnnode);
1596                         ct = nf_ct_tuplehash_to_ctrack(h);
1597                         hlist_nulls_del_rcu(&h->hnnode);
1598                         bucket = __hash_conntrack(&h->tuple, hashsize);
1599                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1600                 }
1601         }
1602         old_size = init_net.ct.htable_size;
1603         old_hash = init_net.ct.hash;
1604
1605         init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1606         init_net.ct.hash = hash;
1607
1608         write_seqcount_end(&init_net.ct.generation);
1609         nf_conntrack_all_unlock();
1610         local_bh_enable();
1611
1612         nf_ct_free_hashtable(old_hash, old_size);
1613         return 0;
1614 }
1615 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1616
1617 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1618                   &nf_conntrack_htable_size, 0600);
1619
1620 void nf_ct_untracked_status_or(unsigned long bits)
1621 {
1622         int cpu;
1623
1624         for_each_possible_cpu(cpu)
1625                 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1626 }
1627 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1628
1629 int nf_conntrack_init_start(void)
1630 {
1631         int max_factor = 8;
1632         int i, ret, cpu;
1633
1634         for (i = 0; i < CONNTRACK_LOCKS; i++)
1635                 spin_lock_init(&nf_conntrack_locks[i]);
1636
1637         if (!nf_conntrack_htable_size) {
1638                 /* Idea from tcp.c: use 1/16384 of memory.
1639                  * On i386: 32MB machine has 512 buckets.
1640                  * >= 1GB machines have 16384 buckets.
1641                  * >= 4GB machines have 65536 buckets.
1642                  */
1643                 nf_conntrack_htable_size
1644                         = (((totalram_pages << PAGE_SHIFT) / 16384)
1645                            / sizeof(struct hlist_head));
1646                 if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
1647                         nf_conntrack_htable_size = 65536;
1648                 else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1649                         nf_conntrack_htable_size = 16384;
1650                 if (nf_conntrack_htable_size < 32)
1651                         nf_conntrack_htable_size = 32;
1652
1653                 /* Use a max. factor of four by default to get the same max as
1654                  * with the old struct list_heads. When a table size is given
1655                  * we use the old value of 8 to avoid reducing the max.
1656                  * entries. */
1657                 max_factor = 4;
1658         }
1659         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1660
1661         printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1662                NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1663                nf_conntrack_max);
1664
1665         ret = nf_conntrack_expect_init();
1666         if (ret < 0)
1667                 goto err_expect;
1668
1669         ret = nf_conntrack_acct_init();
1670         if (ret < 0)
1671                 goto err_acct;
1672
1673         ret = nf_conntrack_tstamp_init();
1674         if (ret < 0)
1675                 goto err_tstamp;
1676
1677         ret = nf_conntrack_ecache_init();
1678         if (ret < 0)
1679                 goto err_ecache;
1680
1681         ret = nf_conntrack_timeout_init();
1682         if (ret < 0)
1683                 goto err_timeout;
1684
1685         ret = nf_conntrack_helper_init();
1686         if (ret < 0)
1687                 goto err_helper;
1688
1689         ret = nf_conntrack_labels_init();
1690         if (ret < 0)
1691                 goto err_labels;
1692
1693         ret = nf_conntrack_seqadj_init();
1694         if (ret < 0)
1695                 goto err_seqadj;
1696
1697 #ifdef CONFIG_NF_CONNTRACK_ZONES
1698         ret = nf_ct_extend_register(&nf_ct_zone_extend);
1699         if (ret < 0)
1700                 goto err_extend;
1701 #endif
1702         ret = nf_conntrack_proto_init();
1703         if (ret < 0)
1704                 goto err_proto;
1705
1706         /* Set up fake conntrack: to never be deleted, not in any hashes */
1707         for_each_possible_cpu(cpu) {
1708                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1709                 write_pnet(&ct->ct_net, &init_net);
1710                 atomic_set(&ct->ct_general.use, 1);
1711         }
1712         /*  - and look it like as a confirmed connection */
1713         nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1714         return 0;
1715
1716 err_proto:
1717 #ifdef CONFIG_NF_CONNTRACK_ZONES
1718         nf_ct_extend_unregister(&nf_ct_zone_extend);
1719 err_extend:
1720 #endif
1721         nf_conntrack_seqadj_fini();
1722 err_seqadj:
1723         nf_conntrack_labels_fini();
1724 err_labels:
1725         nf_conntrack_helper_fini();
1726 err_helper:
1727         nf_conntrack_timeout_fini();
1728 err_timeout:
1729         nf_conntrack_ecache_fini();
1730 err_ecache:
1731         nf_conntrack_tstamp_fini();
1732 err_tstamp:
1733         nf_conntrack_acct_fini();
1734 err_acct:
1735         nf_conntrack_expect_fini();
1736 err_expect:
1737         return ret;
1738 }
1739
1740 void nf_conntrack_init_end(void)
1741 {
1742         /* For use by REJECT target */
1743         RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1744         RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1745 }
1746
1747 /*
1748  * We need to use special "null" values, not used in hash table
1749  */
1750 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
1751 #define DYING_NULLS_VAL         ((1<<30)+1)
1752 #define TEMPLATE_NULLS_VAL      ((1<<30)+2)
1753
1754 int nf_conntrack_init_net(struct net *net)
1755 {
1756         int ret = -ENOMEM;
1757         int cpu;
1758
1759         atomic_set(&net->ct.count, 0);
1760         seqcount_init(&net->ct.generation);
1761
1762         net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
1763         if (!net->ct.pcpu_lists)
1764                 goto err_stat;
1765
1766         for_each_possible_cpu(cpu) {
1767                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1768
1769                 spin_lock_init(&pcpu->lock);
1770                 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1771                 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1772         }
1773
1774         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1775         if (!net->ct.stat)
1776                 goto err_pcpu_lists;
1777
1778         net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1779         if (!net->ct.slabname)
1780                 goto err_slabname;
1781
1782         net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1783                                                         sizeof(struct nf_conn), 0,
1784                                                         SLAB_DESTROY_BY_RCU, NULL);
1785         if (!net->ct.nf_conntrack_cachep) {
1786                 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1787                 goto err_cache;
1788         }
1789
1790         net->ct.htable_size = nf_conntrack_htable_size;
1791         net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
1792         if (!net->ct.hash) {
1793                 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1794                 goto err_hash;
1795         }
1796         ret = nf_conntrack_expect_pernet_init(net);
1797         if (ret < 0)
1798                 goto err_expect;
1799         ret = nf_conntrack_acct_pernet_init(net);
1800         if (ret < 0)
1801                 goto err_acct;
1802         ret = nf_conntrack_tstamp_pernet_init(net);
1803         if (ret < 0)
1804                 goto err_tstamp;
1805         ret = nf_conntrack_ecache_pernet_init(net);
1806         if (ret < 0)
1807                 goto err_ecache;
1808         ret = nf_conntrack_helper_pernet_init(net);
1809         if (ret < 0)
1810                 goto err_helper;
1811         ret = nf_conntrack_proto_pernet_init(net);
1812         if (ret < 0)
1813                 goto err_proto;
1814         return 0;
1815
1816 err_proto:
1817         nf_conntrack_helper_pernet_fini(net);
1818 err_helper:
1819         nf_conntrack_ecache_pernet_fini(net);
1820 err_ecache:
1821         nf_conntrack_tstamp_pernet_fini(net);
1822 err_tstamp:
1823         nf_conntrack_acct_pernet_fini(net);
1824 err_acct:
1825         nf_conntrack_expect_pernet_fini(net);
1826 err_expect:
1827         nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1828 err_hash:
1829         kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1830 err_cache:
1831         kfree(net->ct.slabname);
1832 err_slabname:
1833         free_percpu(net->ct.stat);
1834 err_pcpu_lists:
1835         free_percpu(net->ct.pcpu_lists);
1836 err_stat:
1837         return ret;
1838 }