1 /* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
5 /* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/skbuff.h>
19 #include <linux/proc_fs.h>
20 #include <linux/vmalloc.h>
21 #include <linux/stddef.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/jhash.h>
25 #include <linux/err.h>
26 #include <linux/percpu.h>
27 #include <linux/moduleparam.h>
28 #include <linux/notifier.h>
29 #include <linux/kernel.h>
30 #include <linux/netdevice.h>
31 #include <linux/socket.h>
33 #include <linux/nsproxy.h>
34 #include <linux/rculist_nulls.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_l3proto.h>
38 #include <net/netfilter/nf_conntrack_l4proto.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_core.h>
42 #include <net/netfilter/nf_conntrack_extend.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_ecache.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_conntrack_timeout.h>
48 #include <net/netfilter/nf_nat.h>
49 #include <net/netfilter/nf_nat_core.h>
51 #define NF_CONNTRACK_VERSION "0.5.0"
53 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
54 enum nf_nat_manip_type manip,
55 const struct nlattr *attr) __read_mostly;
56 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
58 int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
60 enum ip_conntrack_info ctinfo,
61 unsigned int protoff);
62 EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
64 DEFINE_SPINLOCK(nf_conntrack_lock);
65 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
67 unsigned int nf_conntrack_htable_size __read_mostly;
68 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
70 unsigned int nf_conntrack_max __read_mostly;
71 EXPORT_SYMBOL_GPL(nf_conntrack_max);
73 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
74 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
76 unsigned int nf_conntrack_hash_rnd __read_mostly;
77 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
79 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
83 /* The direction must be ignored, so we hash everything up to the
84 * destination ports (which is a multiple of 4) and treat the last
85 * three bytes manually.
87 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
88 return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
89 (((__force __u16)tuple->dst.u.all << 16) |
90 tuple->dst.protonum));
93 static u32 __hash_bucket(u32 hash, unsigned int size)
95 return ((u64)hash * size) >> 32;
98 static u32 hash_bucket(u32 hash, const struct net *net)
100 return __hash_bucket(hash, net->ct.htable_size);
103 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
104 u16 zone, unsigned int size)
106 return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
109 static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
110 const struct nf_conntrack_tuple *tuple)
112 return __hash_conntrack(tuple, zone, net->ct.htable_size);
116 nf_ct_get_tuple(const struct sk_buff *skb,
118 unsigned int dataoff,
121 struct nf_conntrack_tuple *tuple,
122 const struct nf_conntrack_l3proto *l3proto,
123 const struct nf_conntrack_l4proto *l4proto)
125 memset(tuple, 0, sizeof(*tuple));
127 tuple->src.l3num = l3num;
128 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
131 tuple->dst.protonum = protonum;
132 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
134 return l4proto->pkt_to_tuple(skb, dataoff, tuple);
136 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
138 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
139 u_int16_t l3num, struct nf_conntrack_tuple *tuple)
141 struct nf_conntrack_l3proto *l3proto;
142 struct nf_conntrack_l4proto *l4proto;
143 unsigned int protoff;
149 l3proto = __nf_ct_l3proto_find(l3num);
150 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
151 if (ret != NF_ACCEPT) {
156 l4proto = __nf_ct_l4proto_find(l3num, protonum);
158 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
164 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
167 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
168 const struct nf_conntrack_tuple *orig,
169 const struct nf_conntrack_l3proto *l3proto,
170 const struct nf_conntrack_l4proto *l4proto)
172 memset(inverse, 0, sizeof(*inverse));
174 inverse->src.l3num = orig->src.l3num;
175 if (l3proto->invert_tuple(inverse, orig) == 0)
178 inverse->dst.dir = !orig->dst.dir;
180 inverse->dst.protonum = orig->dst.protonum;
181 return l4proto->invert_tuple(inverse, orig);
183 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
186 clean_from_lists(struct nf_conn *ct)
188 pr_debug("clean_from_lists(%p)\n", ct);
189 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
190 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
192 /* Destroy all pending expectations */
193 nf_ct_remove_expectations(ct);
197 destroy_conntrack(struct nf_conntrack *nfct)
199 struct nf_conn *ct = (struct nf_conn *)nfct;
200 struct net *net = nf_ct_net(ct);
201 struct nf_conntrack_l4proto *l4proto;
203 pr_debug("destroy_conntrack(%p)\n", ct);
204 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
205 NF_CT_ASSERT(!timer_pending(&ct->timeout));
207 /* To make sure we don't get any weird locking issues here:
208 * destroy_conntrack() MUST NOT be called with a write lock
209 * to nf_conntrack_lock!!! -HW */
211 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
212 if (l4proto && l4proto->destroy)
213 l4proto->destroy(ct);
217 spin_lock_bh(&nf_conntrack_lock);
218 /* Expectations will have been removed in clean_from_lists,
219 * except TFTP can create an expectation on the first packet,
220 * before connection is in the list, so we need to clean here,
222 nf_ct_remove_expectations(ct);
224 /* We overload first tuple to link into unconfirmed list. */
225 if (!nf_ct_is_confirmed(ct)) {
226 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
227 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
230 NF_CT_STAT_INC(net, delete);
231 spin_unlock_bh(&nf_conntrack_lock);
234 nf_ct_put(ct->master);
236 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
237 nf_conntrack_free(ct);
240 void nf_ct_delete_from_lists(struct nf_conn *ct)
242 struct net *net = nf_ct_net(ct);
244 nf_ct_helper_destroy(ct);
245 spin_lock_bh(&nf_conntrack_lock);
246 /* Inside lock so preempt is disabled on module removal path.
247 * Otherwise we can get spurious warnings. */
248 NF_CT_STAT_INC(net, delete_list);
249 clean_from_lists(ct);
250 spin_unlock_bh(&nf_conntrack_lock);
252 EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
254 static void death_by_event(unsigned long ul_conntrack)
256 struct nf_conn *ct = (void *)ul_conntrack;
257 struct net *net = nf_ct_net(ct);
259 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
260 /* bad luck, let's retry again */
261 ct->timeout.expires = jiffies +
262 (random32() % net->ct.sysctl_events_retry_timeout);
263 add_timer(&ct->timeout);
266 /* we've got the event delivered, now it's dying */
267 set_bit(IPS_DYING_BIT, &ct->status);
268 spin_lock(&nf_conntrack_lock);
269 hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
270 spin_unlock(&nf_conntrack_lock);
274 void nf_ct_insert_dying_list(struct nf_conn *ct)
276 struct net *net = nf_ct_net(ct);
278 /* add this conntrack to the dying list */
279 spin_lock_bh(&nf_conntrack_lock);
280 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
282 spin_unlock_bh(&nf_conntrack_lock);
283 /* set a new timer to retry event delivery */
284 setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
285 ct->timeout.expires = jiffies +
286 (random32() % net->ct.sysctl_events_retry_timeout);
287 add_timer(&ct->timeout);
289 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
291 static void death_by_timeout(unsigned long ul_conntrack)
293 struct nf_conn *ct = (void *)ul_conntrack;
294 struct nf_conn_tstamp *tstamp;
296 tstamp = nf_conn_tstamp_find(ct);
297 if (tstamp && tstamp->stop == 0)
298 tstamp->stop = ktime_to_ns(ktime_get_real());
300 if (!test_bit(IPS_DYING_BIT, &ct->status) &&
301 unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
302 /* destroy event was not delivered */
303 nf_ct_delete_from_lists(ct);
304 nf_ct_insert_dying_list(ct);
307 set_bit(IPS_DYING_BIT, &ct->status);
308 nf_ct_delete_from_lists(ct);
314 * - Caller must take a reference on returned object
315 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
317 * - Caller must lock nf_conntrack_lock before calling this function
319 static struct nf_conntrack_tuple_hash *
320 ____nf_conntrack_find(struct net *net, u16 zone,
321 const struct nf_conntrack_tuple *tuple, u32 hash)
323 struct nf_conntrack_tuple_hash *h;
324 struct hlist_nulls_node *n;
325 unsigned int bucket = hash_bucket(hash, net);
327 /* Disable BHs the entire time since we normally need to disable them
328 * at least once for the stats anyway.
332 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
333 if (nf_ct_tuple_equal(tuple, &h->tuple) &&
334 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
335 NF_CT_STAT_INC(net, found);
339 NF_CT_STAT_INC(net, searched);
342 * if the nulls value we got at the end of this lookup is
343 * not the expected one, we must restart lookup.
344 * We probably met an item that was moved to another chain.
346 if (get_nulls_value(n) != bucket) {
347 NF_CT_STAT_INC(net, search_restart);
355 struct nf_conntrack_tuple_hash *
356 __nf_conntrack_find(struct net *net, u16 zone,
357 const struct nf_conntrack_tuple *tuple)
359 return ____nf_conntrack_find(net, zone, tuple,
360 hash_conntrack_raw(tuple, zone));
362 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
364 /* Find a connection corresponding to a tuple. */
365 static struct nf_conntrack_tuple_hash *
366 __nf_conntrack_find_get(struct net *net, u16 zone,
367 const struct nf_conntrack_tuple *tuple, u32 hash)
369 struct nf_conntrack_tuple_hash *h;
374 h = ____nf_conntrack_find(net, zone, tuple, hash);
376 ct = nf_ct_tuplehash_to_ctrack(h);
377 if (unlikely(nf_ct_is_dying(ct) ||
378 !atomic_inc_not_zero(&ct->ct_general.use)))
381 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
382 nf_ct_zone(ct) != zone)) {
393 struct nf_conntrack_tuple_hash *
394 nf_conntrack_find_get(struct net *net, u16 zone,
395 const struct nf_conntrack_tuple *tuple)
397 return __nf_conntrack_find_get(net, zone, tuple,
398 hash_conntrack_raw(tuple, zone));
400 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
402 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
404 unsigned int repl_hash)
406 struct net *net = nf_ct_net(ct);
408 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
409 &net->ct.hash[hash]);
410 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
411 &net->ct.hash[repl_hash]);
415 nf_conntrack_hash_check_insert(struct nf_conn *ct)
417 struct net *net = nf_ct_net(ct);
418 unsigned int hash, repl_hash;
419 struct nf_conntrack_tuple_hash *h;
420 struct hlist_nulls_node *n;
423 zone = nf_ct_zone(ct);
424 hash = hash_conntrack(net, zone,
425 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
426 repl_hash = hash_conntrack(net, zone,
427 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
429 spin_lock_bh(&nf_conntrack_lock);
431 /* See if there's one in the list already, including reverse */
432 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
433 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
435 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
437 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
438 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
440 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
443 add_timer(&ct->timeout);
444 nf_conntrack_get(&ct->ct_general);
445 __nf_conntrack_hash_insert(ct, hash, repl_hash);
446 NF_CT_STAT_INC(net, insert);
447 spin_unlock_bh(&nf_conntrack_lock);
452 NF_CT_STAT_INC(net, insert_failed);
453 spin_unlock_bh(&nf_conntrack_lock);
456 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
458 /* Confirm a connection given skb; places it in hash table */
460 __nf_conntrack_confirm(struct sk_buff *skb)
462 unsigned int hash, repl_hash;
463 struct nf_conntrack_tuple_hash *h;
465 struct nf_conn_help *help;
466 struct nf_conn_tstamp *tstamp;
467 struct hlist_nulls_node *n;
468 enum ip_conntrack_info ctinfo;
472 ct = nf_ct_get(skb, &ctinfo);
475 /* ipt_REJECT uses nf_conntrack_attach to attach related
476 ICMP/TCP RST packets in other direction. Actual packet
477 which created connection will be IP_CT_NEW or for an
478 expected connection, IP_CT_RELATED. */
479 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
482 zone = nf_ct_zone(ct);
483 /* reuse the hash saved before */
484 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
485 hash = hash_bucket(hash, net);
486 repl_hash = hash_conntrack(net, zone,
487 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
489 /* We're not in hash table, and we refuse to set up related
490 connections for unconfirmed conns. But packet copies and
491 REJECT will give spurious warnings here. */
492 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
494 /* No external references means no one else could have
496 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
497 pr_debug("Confirming conntrack %p\n", ct);
499 spin_lock_bh(&nf_conntrack_lock);
501 /* We have to check the DYING flag inside the lock to prevent
502 a race against nf_ct_get_next_corpse() possibly called from
503 user context, else we insert an already 'dead' hash, blocking
504 further use of that particular connection -JM */
506 if (unlikely(nf_ct_is_dying(ct))) {
507 spin_unlock_bh(&nf_conntrack_lock);
511 /* See if there's one in the list already, including reverse:
512 NAT could have grabbed it without realizing, since we're
513 not in the hash. If there is, we lost race. */
514 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
515 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
517 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
519 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
520 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
522 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
525 /* Remove from unconfirmed list */
526 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
528 /* Timer relative to confirmation time, not original
529 setting time, otherwise we'd get timer wrap in
530 weird delay cases. */
531 ct->timeout.expires += jiffies;
532 add_timer(&ct->timeout);
533 atomic_inc(&ct->ct_general.use);
534 ct->status |= IPS_CONFIRMED;
536 /* set conntrack timestamp, if enabled. */
537 tstamp = nf_conn_tstamp_find(ct);
539 if (skb->tstamp.tv64 == 0)
540 __net_timestamp(skb);
542 tstamp->start = ktime_to_ns(skb->tstamp);
544 /* Since the lookup is lockless, hash insertion must be done after
545 * starting the timer and setting the CONFIRMED bit. The RCU barriers
546 * guarantee that no other CPU can find the conntrack before the above
547 * stores are visible.
549 __nf_conntrack_hash_insert(ct, hash, repl_hash);
550 NF_CT_STAT_INC(net, insert);
551 spin_unlock_bh(&nf_conntrack_lock);
553 help = nfct_help(ct);
554 if (help && help->helper)
555 nf_conntrack_event_cache(IPCT_HELPER, ct);
557 nf_conntrack_event_cache(master_ct(ct) ?
558 IPCT_RELATED : IPCT_NEW, ct);
562 NF_CT_STAT_INC(net, insert_failed);
563 spin_unlock_bh(&nf_conntrack_lock);
566 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
568 /* Returns true if a connection correspondings to the tuple (required
571 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
572 const struct nf_conn *ignored_conntrack)
574 struct net *net = nf_ct_net(ignored_conntrack);
575 struct nf_conntrack_tuple_hash *h;
576 struct hlist_nulls_node *n;
578 u16 zone = nf_ct_zone(ignored_conntrack);
579 unsigned int hash = hash_conntrack(net, zone, tuple);
581 /* Disable BHs the entire time since we need to disable them at
582 * least once for the stats anyway.
585 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
586 ct = nf_ct_tuplehash_to_ctrack(h);
587 if (ct != ignored_conntrack &&
588 nf_ct_tuple_equal(tuple, &h->tuple) &&
589 nf_ct_zone(ct) == zone) {
590 NF_CT_STAT_INC(net, found);
591 rcu_read_unlock_bh();
594 NF_CT_STAT_INC(net, searched);
596 rcu_read_unlock_bh();
600 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
602 #define NF_CT_EVICTION_RANGE 8
604 /* There's a small race here where we may free a just-assured
605 connection. Too bad: we're in trouble anyway. */
606 static noinline int early_drop(struct net *net, unsigned int hash)
608 /* Use oldest entry, which is roughly LRU */
609 struct nf_conntrack_tuple_hash *h;
610 struct nf_conn *ct = NULL, *tmp;
611 struct hlist_nulls_node *n;
612 unsigned int i, cnt = 0;
616 for (i = 0; i < net->ct.htable_size; i++) {
617 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
619 tmp = nf_ct_tuplehash_to_ctrack(h);
620 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
626 if (likely(!nf_ct_is_dying(ct) &&
627 atomic_inc_not_zero(&ct->ct_general.use)))
633 if (cnt >= NF_CT_EVICTION_RANGE)
636 hash = (hash + 1) % net->ct.htable_size;
643 if (del_timer(&ct->timeout)) {
644 death_by_timeout((unsigned long)ct);
645 /* Check if we indeed killed this entry. Reliable event
646 delivery may have inserted it into the dying list. */
647 if (test_bit(IPS_DYING_BIT, &ct->status)) {
649 NF_CT_STAT_INC_ATOMIC(net, early_drop);
656 void init_nf_conntrack_hash_rnd(void)
661 * Why not initialize nf_conntrack_rnd in a "init()" function ?
662 * Because there isn't enough entropy when system initializing,
663 * and we initialize it as late as possible.
666 get_random_bytes(&rand, sizeof(rand));
668 cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
671 static struct nf_conn *
672 __nf_conntrack_alloc(struct net *net, u16 zone,
673 const struct nf_conntrack_tuple *orig,
674 const struct nf_conntrack_tuple *repl,
679 if (unlikely(!nf_conntrack_hash_rnd)) {
680 init_nf_conntrack_hash_rnd();
681 /* recompute the hash as nf_conntrack_hash_rnd is initialized */
682 hash = hash_conntrack_raw(orig, zone);
685 /* We don't want any race condition at early drop stage */
686 atomic_inc(&net->ct.count);
688 if (nf_conntrack_max &&
689 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
690 if (!early_drop(net, hash_bucket(hash, net))) {
691 atomic_dec(&net->ct.count);
692 net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
693 return ERR_PTR(-ENOMEM);
698 * Do not use kmem_cache_zalloc(), as this cache uses
699 * SLAB_DESTROY_BY_RCU.
701 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
703 atomic_dec(&net->ct.count);
704 return ERR_PTR(-ENOMEM);
707 * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
708 * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
710 memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
711 offsetof(struct nf_conn, proto) -
712 offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
713 spin_lock_init(&ct->lock);
714 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
715 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
716 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
717 /* save hash for reusing when confirming */
718 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
719 /* Don't set timer yet: wait for confirmation */
720 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
721 write_pnet(&ct->ct_net, net);
722 #ifdef CONFIG_NF_CONNTRACK_ZONES
724 struct nf_conntrack_zone *nf_ct_zone;
726 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
729 nf_ct_zone->id = zone;
733 * changes to lookup keys must be done before setting refcnt to 1
736 atomic_set(&ct->ct_general.use, 1);
739 #ifdef CONFIG_NF_CONNTRACK_ZONES
741 atomic_dec(&net->ct.count);
742 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
743 return ERR_PTR(-ENOMEM);
747 struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
748 const struct nf_conntrack_tuple *orig,
749 const struct nf_conntrack_tuple *repl,
752 return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
754 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
756 void nf_conntrack_free(struct nf_conn *ct)
758 struct net *net = nf_ct_net(ct);
760 nf_ct_ext_destroy(ct);
761 atomic_dec(&net->ct.count);
763 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
765 EXPORT_SYMBOL_GPL(nf_conntrack_free);
767 /* Allocate a new conntrack: we return -ENOMEM if classification
768 failed due to stress. Otherwise it really is unclassifiable. */
769 static struct nf_conntrack_tuple_hash *
770 init_conntrack(struct net *net, struct nf_conn *tmpl,
771 const struct nf_conntrack_tuple *tuple,
772 struct nf_conntrack_l3proto *l3proto,
773 struct nf_conntrack_l4proto *l4proto,
775 unsigned int dataoff, u32 hash)
778 struct nf_conn_help *help;
779 struct nf_conntrack_tuple repl_tuple;
780 struct nf_conntrack_ecache *ecache;
781 struct nf_conntrack_expect *exp;
782 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
783 struct nf_conn_timeout *timeout_ext;
784 unsigned int *timeouts;
786 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
787 pr_debug("Can't invert tuple.\n");
791 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
794 return (struct nf_conntrack_tuple_hash *)ct;
796 timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
798 timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
800 timeouts = l4proto->get_timeouts(net);
802 if (!l4proto->new(ct, skb, dataoff, timeouts)) {
803 nf_conntrack_free(ct);
804 pr_debug("init conntrack: can't track with proto module\n");
809 nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC);
811 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
812 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
814 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
815 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
816 ecache ? ecache->expmask : 0,
819 spin_lock_bh(&nf_conntrack_lock);
820 exp = nf_ct_find_expectation(net, zone, tuple);
822 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
824 /* Welcome, Mr. Bond. We've been expecting you... */
825 __set_bit(IPS_EXPECTED_BIT, &ct->status);
826 ct->master = exp->master;
828 help = nf_ct_helper_ext_add(ct, exp->helper,
831 rcu_assign_pointer(help->helper, exp->helper);
834 #ifdef CONFIG_NF_CONNTRACK_MARK
835 ct->mark = exp->master->mark;
837 #ifdef CONFIG_NF_CONNTRACK_SECMARK
838 ct->secmark = exp->master->secmark;
840 nf_conntrack_get(&ct->master->ct_general);
841 NF_CT_STAT_INC(net, expect_new);
843 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
844 NF_CT_STAT_INC(net, new);
847 /* Overload tuple linked list to put us in unconfirmed list. */
848 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
849 &net->ct.unconfirmed);
851 spin_unlock_bh(&nf_conntrack_lock);
855 exp->expectfn(ct, exp);
856 nf_ct_expect_put(exp);
859 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
862 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
863 static inline struct nf_conn *
864 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
866 unsigned int dataoff,
869 struct nf_conntrack_l3proto *l3proto,
870 struct nf_conntrack_l4proto *l4proto,
872 enum ip_conntrack_info *ctinfo)
874 struct nf_conntrack_tuple tuple;
875 struct nf_conntrack_tuple_hash *h;
877 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
880 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
881 dataoff, l3num, protonum, &tuple, l3proto,
883 pr_debug("resolve_normal_ct: Can't get tuple\n");
887 /* look for tuple match */
888 hash = hash_conntrack_raw(&tuple, zone);
889 h = __nf_conntrack_find_get(net, zone, &tuple, hash);
891 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
898 ct = nf_ct_tuplehash_to_ctrack(h);
900 /* It exists; we have (non-exclusive) reference. */
901 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
902 *ctinfo = IP_CT_ESTABLISHED_REPLY;
903 /* Please set reply bit if this packet OK */
906 /* Once we've had two way comms, always ESTABLISHED. */
907 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
908 pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
909 *ctinfo = IP_CT_ESTABLISHED;
910 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
911 pr_debug("nf_conntrack_in: related packet for %p\n",
913 *ctinfo = IP_CT_RELATED;
915 pr_debug("nf_conntrack_in: new packet for %p\n", ct);
920 skb->nfct = &ct->ct_general;
921 skb->nfctinfo = *ctinfo;
926 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
929 struct nf_conn *ct, *tmpl = NULL;
930 enum ip_conntrack_info ctinfo;
931 struct nf_conntrack_l3proto *l3proto;
932 struct nf_conntrack_l4proto *l4proto;
933 unsigned int *timeouts;
934 unsigned int dataoff;
940 /* Previously seen (loopback or untracked)? Ignore. */
941 tmpl = (struct nf_conn *)skb->nfct;
942 if (!nf_ct_is_template(tmpl)) {
943 NF_CT_STAT_INC_ATOMIC(net, ignore);
949 /* rcu_read_lock()ed by nf_hook_slow */
950 l3proto = __nf_ct_l3proto_find(pf);
951 ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
952 &dataoff, &protonum);
954 pr_debug("not prepared to track yet or error occurred\n");
955 NF_CT_STAT_INC_ATOMIC(net, error);
956 NF_CT_STAT_INC_ATOMIC(net, invalid);
961 l4proto = __nf_ct_l4proto_find(pf, protonum);
963 /* It may be an special packet, error, unclean...
964 * inverse of the return code tells to the netfilter
965 * core what to do with the packet. */
966 if (l4proto->error != NULL) {
967 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
970 NF_CT_STAT_INC_ATOMIC(net, error);
971 NF_CT_STAT_INC_ATOMIC(net, invalid);
975 /* ICMP[v6] protocol trackers may assign one conntrack. */
980 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
981 l3proto, l4proto, &set_reply, &ctinfo);
983 /* Not valid part of a connection */
984 NF_CT_STAT_INC_ATOMIC(net, invalid);
990 /* Too stressed to deal. */
991 NF_CT_STAT_INC_ATOMIC(net, drop);
996 NF_CT_ASSERT(skb->nfct);
998 /* Decide what timeout policy we want to apply to this flow. */
999 timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1001 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1003 /* Invalid: inverse of the return code tells
1004 * the netfilter core what to do */
1005 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1006 nf_conntrack_put(skb->nfct);
1008 NF_CT_STAT_INC_ATOMIC(net, invalid);
1009 if (ret == -NF_DROP)
1010 NF_CT_STAT_INC_ATOMIC(net, drop);
1015 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1016 nf_conntrack_event_cache(IPCT_REPLY, ct);
1019 /* Special case: we have to repeat this hook, assign the
1020 * template again to this packet. We assume that this packet
1021 * has no conntrack assigned. This is used by nf_ct_tcp. */
1022 if (ret == NF_REPEAT)
1023 skb->nfct = (struct nf_conntrack *)tmpl;
1030 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1032 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1033 const struct nf_conntrack_tuple *orig)
1038 ret = nf_ct_invert_tuple(inverse, orig,
1039 __nf_ct_l3proto_find(orig->src.l3num),
1040 __nf_ct_l4proto_find(orig->src.l3num,
1041 orig->dst.protonum));
1045 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1047 /* Alter reply tuple (maybe alter helper). This is for NAT, and is
1048 implicitly racy: see __nf_conntrack_confirm */
1049 void nf_conntrack_alter_reply(struct nf_conn *ct,
1050 const struct nf_conntrack_tuple *newreply)
1052 struct nf_conn_help *help = nfct_help(ct);
1054 /* Should be unconfirmed, so not in hash table yet */
1055 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1057 pr_debug("Altering reply tuple of %p to ", ct);
1058 nf_ct_dump_tuple(newreply);
1060 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1061 if (ct->master || (help && !hlist_empty(&help->expectations)))
1065 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1068 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1070 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1071 void __nf_ct_refresh_acct(struct nf_conn *ct,
1072 enum ip_conntrack_info ctinfo,
1073 const struct sk_buff *skb,
1074 unsigned long extra_jiffies,
1077 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1080 /* Only update if this is not a fixed timeout */
1081 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1084 /* If not in hash table, timer will not be active yet */
1085 if (!nf_ct_is_confirmed(ct)) {
1086 ct->timeout.expires = extra_jiffies;
1088 unsigned long newtime = jiffies + extra_jiffies;
1090 /* Only update the timeout if the new timeout is at least
1091 HZ jiffies from the old timeout. Need del_timer for race
1092 avoidance (may already be dying). */
1093 if (newtime - ct->timeout.expires >= HZ)
1094 mod_timer_pending(&ct->timeout, newtime);
1099 struct nf_conn_counter *acct;
1101 acct = nf_conn_acct_find(ct);
1103 atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
1104 atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
1108 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1110 bool __nf_ct_kill_acct(struct nf_conn *ct,
1111 enum ip_conntrack_info ctinfo,
1112 const struct sk_buff *skb,
1116 struct nf_conn_counter *acct;
1118 acct = nf_conn_acct_find(ct);
1120 atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
1121 atomic64_add(skb->len - skb_network_offset(skb),
1122 &acct[CTINFO2DIR(ctinfo)].bytes);
1126 if (del_timer(&ct->timeout)) {
1127 ct->timeout.function((unsigned long)ct);
1132 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1134 #ifdef CONFIG_NF_CONNTRACK_ZONES
1135 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1136 .len = sizeof(struct nf_conntrack_zone),
1137 .align = __alignof__(struct nf_conntrack_zone),
1138 .id = NF_CT_EXT_ZONE,
1142 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1144 #include <linux/netfilter/nfnetlink.h>
1145 #include <linux/netfilter/nfnetlink_conntrack.h>
1146 #include <linux/mutex.h>
1148 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1149 * in ip_conntrack_core, since we don't want the protocols to autoload
1150 * or depend on ctnetlink */
1151 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1152 const struct nf_conntrack_tuple *tuple)
1154 if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1155 nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1156 goto nla_put_failure;
1162 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1164 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1165 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
1166 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
1168 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1170 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1171 struct nf_conntrack_tuple *t)
1173 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1176 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1177 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1181 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1183 int nf_ct_port_nlattr_tuple_size(void)
1185 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1187 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1190 /* Used by ipt_REJECT and ip6t_REJECT. */
1191 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1194 enum ip_conntrack_info ctinfo;
1196 /* This ICMP is in reverse direction to the packet which caused it */
1197 ct = nf_ct_get(skb, &ctinfo);
1198 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1199 ctinfo = IP_CT_RELATED_REPLY;
1201 ctinfo = IP_CT_RELATED;
1203 /* Attach to new skbuff, and increment count */
1204 nskb->nfct = &ct->ct_general;
1205 nskb->nfctinfo = ctinfo;
1206 nf_conntrack_get(nskb->nfct);
1209 /* Bring out ya dead! */
1210 static struct nf_conn *
1211 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1212 void *data, unsigned int *bucket)
1214 struct nf_conntrack_tuple_hash *h;
1216 struct hlist_nulls_node *n;
1218 spin_lock_bh(&nf_conntrack_lock);
1219 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1220 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1221 ct = nf_ct_tuplehash_to_ctrack(h);
1226 hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
1227 ct = nf_ct_tuplehash_to_ctrack(h);
1229 set_bit(IPS_DYING_BIT, &ct->status);
1231 spin_unlock_bh(&nf_conntrack_lock);
1234 atomic_inc(&ct->ct_general.use);
1235 spin_unlock_bh(&nf_conntrack_lock);
1239 void nf_ct_iterate_cleanup(struct net *net,
1240 int (*iter)(struct nf_conn *i, void *data),
1244 unsigned int bucket = 0;
1246 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1247 /* Time to push up daises... */
1248 if (del_timer(&ct->timeout))
1249 death_by_timeout((unsigned long)ct);
1250 /* ... else the timer will get him soon. */
1255 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1257 struct __nf_ct_flush_report {
1262 static int kill_report(struct nf_conn *i, void *data)
1264 struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1265 struct nf_conn_tstamp *tstamp;
1267 tstamp = nf_conn_tstamp_find(i);
1268 if (tstamp && tstamp->stop == 0)
1269 tstamp->stop = ktime_to_ns(ktime_get_real());
1271 /* If we fail to deliver the event, death_by_timeout() will retry */
1272 if (nf_conntrack_event_report(IPCT_DESTROY, i,
1273 fr->pid, fr->report) < 0)
1276 /* Avoid the delivery of the destroy event in death_by_timeout(). */
1277 set_bit(IPS_DYING_BIT, &i->status);
1281 static int kill_all(struct nf_conn *i, void *data)
1286 void nf_ct_free_hashtable(void *hash, unsigned int size)
1288 if (is_vmalloc_addr(hash))
1291 free_pages((unsigned long)hash,
1292 get_order(sizeof(struct hlist_head) * size));
1294 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1296 void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
1298 struct __nf_ct_flush_report fr = {
1302 nf_ct_iterate_cleanup(net, kill_report, &fr);
1304 EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
1306 static void nf_ct_release_dying_list(struct net *net)
1308 struct nf_conntrack_tuple_hash *h;
1310 struct hlist_nulls_node *n;
1312 spin_lock_bh(&nf_conntrack_lock);
1313 hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
1314 ct = nf_ct_tuplehash_to_ctrack(h);
1315 /* never fails to remove them, no listeners at this point */
1318 spin_unlock_bh(&nf_conntrack_lock);
1321 static int untrack_refs(void)
1325 for_each_possible_cpu(cpu) {
1326 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1328 cnt += atomic_read(&ct->ct_general.use) - 1;
1333 static void nf_conntrack_cleanup_init_net(void)
1335 while (untrack_refs() > 0)
1338 #ifdef CONFIG_NF_CONNTRACK_ZONES
1339 nf_ct_extend_unregister(&nf_ct_zone_extend);
1343 static void nf_conntrack_cleanup_net(struct net *net)
1346 nf_ct_iterate_cleanup(net, kill_all, NULL);
1347 nf_ct_release_dying_list(net);
1348 if (atomic_read(&net->ct.count) != 0) {
1350 goto i_see_dead_people;
1353 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1354 nf_conntrack_helper_fini(net);
1355 nf_conntrack_timeout_fini(net);
1356 nf_conntrack_ecache_fini(net);
1357 nf_conntrack_tstamp_fini(net);
1358 nf_conntrack_acct_fini(net);
1359 nf_conntrack_expect_fini(net);
1360 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1361 kfree(net->ct.slabname);
1362 free_percpu(net->ct.stat);
1365 /* Mishearing the voices in his head, our hero wonders how he's
1366 supposed to kill the mall. */
1367 void nf_conntrack_cleanup(struct net *net)
1369 if (net_eq(net, &init_net))
1370 RCU_INIT_POINTER(ip_ct_attach, NULL);
1372 /* This makes sure all current packets have passed through
1373 netfilter framework. Roll on, two-stage module
1376 nf_conntrack_proto_fini(net);
1377 nf_conntrack_cleanup_net(net);
1379 if (net_eq(net, &init_net)) {
1380 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1381 nf_conntrack_cleanup_init_net();
1385 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1387 struct hlist_nulls_head *hash;
1388 unsigned int nr_slots, i;
1391 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1392 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1393 sz = nr_slots * sizeof(struct hlist_nulls_head);
1394 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1397 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1402 for (i = 0; i < nr_slots; i++)
1403 INIT_HLIST_NULLS_HEAD(&hash[i], i);
1407 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1409 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1412 unsigned int hashsize, old_size;
1413 struct hlist_nulls_head *hash, *old_hash;
1414 struct nf_conntrack_tuple_hash *h;
1417 if (current->nsproxy->net_ns != &init_net)
1420 /* On boot, we can set this without any fancy locking. */
1421 if (!nf_conntrack_htable_size)
1422 return param_set_uint(val, kp);
1424 hashsize = simple_strtoul(val, NULL, 0);
1428 hash = nf_ct_alloc_hashtable(&hashsize, 1);
1432 /* Lookups in the old hash might happen in parallel, which means we
1433 * might get false negatives during connection lookup. New connections
1434 * created because of a false negative won't make it into the hash
1435 * though since that required taking the lock.
1437 spin_lock_bh(&nf_conntrack_lock);
1438 for (i = 0; i < init_net.ct.htable_size; i++) {
1439 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1440 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1441 struct nf_conntrack_tuple_hash, hnnode);
1442 ct = nf_ct_tuplehash_to_ctrack(h);
1443 hlist_nulls_del_rcu(&h->hnnode);
1444 bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
1446 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1449 old_size = init_net.ct.htable_size;
1450 old_hash = init_net.ct.hash;
1452 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1453 init_net.ct.hash = hash;
1454 spin_unlock_bh(&nf_conntrack_lock);
1456 nf_ct_free_hashtable(old_hash, old_size);
1459 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1461 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1462 &nf_conntrack_htable_size, 0600);
1464 void nf_ct_untracked_status_or(unsigned long bits)
1468 for_each_possible_cpu(cpu)
1469 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1471 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1473 static int nf_conntrack_init_init_net(void)
1478 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1479 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1480 if (!nf_conntrack_htable_size) {
1481 nf_conntrack_htable_size
1482 = (((totalram_pages << PAGE_SHIFT) / 16384)
1483 / sizeof(struct hlist_head));
1484 if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1485 nf_conntrack_htable_size = 16384;
1486 if (nf_conntrack_htable_size < 32)
1487 nf_conntrack_htable_size = 32;
1489 /* Use a max. factor of four by default to get the same max as
1490 * with the old struct list_heads. When a table size is given
1491 * we use the old value of 8 to avoid reducing the max.
1495 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1497 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1498 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1500 #ifdef CONFIG_NF_CONNTRACK_ZONES
1501 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1505 /* Set up fake conntrack: to never be deleted, not in any hashes */
1506 for_each_possible_cpu(cpu) {
1507 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1508 write_pnet(&ct->ct_net, &init_net);
1509 atomic_set(&ct->ct_general.use, 1);
1511 /* - and look it like as a confirmed connection */
1512 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1515 #ifdef CONFIG_NF_CONNTRACK_ZONES
1522 * We need to use special "null" values, not used in hash table
1524 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
1525 #define DYING_NULLS_VAL ((1<<30)+1)
1527 static int nf_conntrack_init_net(struct net *net)
1531 atomic_set(&net->ct.count, 0);
1532 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1533 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1534 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1535 if (!net->ct.stat) {
1540 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1541 if (!net->ct.slabname) {
1546 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1547 sizeof(struct nf_conn), 0,
1548 SLAB_DESTROY_BY_RCU, NULL);
1549 if (!net->ct.nf_conntrack_cachep) {
1550 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1555 net->ct.htable_size = nf_conntrack_htable_size;
1556 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
1557 if (!net->ct.hash) {
1559 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1562 ret = nf_conntrack_expect_init(net);
1565 ret = nf_conntrack_acct_init(net);
1568 ret = nf_conntrack_tstamp_init(net);
1571 ret = nf_conntrack_ecache_init(net);
1574 ret = nf_conntrack_timeout_init(net);
1577 ret = nf_conntrack_helper_init(net);
1582 nf_conntrack_timeout_fini(net);
1584 nf_conntrack_ecache_fini(net);
1586 nf_conntrack_tstamp_fini(net);
1588 nf_conntrack_acct_fini(net);
1590 nf_conntrack_expect_fini(net);
1592 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1594 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1596 kfree(net->ct.slabname);
1598 free_percpu(net->ct.stat);
1603 s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1604 enum ip_conntrack_dir dir,
1606 EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
1608 int nf_conntrack_init(struct net *net)
1612 if (net_eq(net, &init_net)) {
1613 ret = nf_conntrack_init_init_net();
1617 ret = nf_conntrack_proto_init(net);
1620 ret = nf_conntrack_init_net(net);
1624 if (net_eq(net, &init_net)) {
1625 /* For use by REJECT target */
1626 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1627 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1629 /* Howto get NAT offsets */
1630 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
1635 nf_conntrack_proto_fini(net);
1637 if (net_eq(net, &init_net))
1638 nf_conntrack_cleanup_init_net();