Merge branch 'pm-qos'
[firefly-linux-kernel-4.4.55.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/skbuff.h>
19 #include <linux/proc_fs.h>
20 #include <linux/vmalloc.h>
21 #include <linux/stddef.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/jhash.h>
25 #include <linux/err.h>
26 #include <linux/percpu.h>
27 #include <linux/moduleparam.h>
28 #include <linux/notifier.h>
29 #include <linux/kernel.h>
30 #include <linux/netdevice.h>
31 #include <linux/socket.h>
32 #include <linux/mm.h>
33 #include <linux/nsproxy.h>
34 #include <linux/rculist_nulls.h>
35
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_l3proto.h>
38 #include <net/netfilter/nf_conntrack_l4proto.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_core.h>
42 #include <net/netfilter/nf_conntrack_extend.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_ecache.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_conntrack_timeout.h>
48 #include <net/netfilter/nf_nat.h>
49 #include <net/netfilter/nf_nat_core.h>
50
51 #define NF_CONNTRACK_VERSION    "0.5.0"
52
53 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
54                                       enum nf_nat_manip_type manip,
55                                       const struct nlattr *attr) __read_mostly;
56 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
57
58 DEFINE_SPINLOCK(nf_conntrack_lock);
59 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
60
61 unsigned int nf_conntrack_htable_size __read_mostly;
62 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
63
64 unsigned int nf_conntrack_max __read_mostly;
65 EXPORT_SYMBOL_GPL(nf_conntrack_max);
66
67 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
68 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
69
70 unsigned int nf_conntrack_hash_rnd __read_mostly;
71 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
72
73 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
74 {
75         unsigned int n;
76
77         /* The direction must be ignored, so we hash everything up to the
78          * destination ports (which is a multiple of 4) and treat the last
79          * three bytes manually.
80          */
81         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
82         return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
83                       (((__force __u16)tuple->dst.u.all << 16) |
84                       tuple->dst.protonum));
85 }
86
87 static u32 __hash_bucket(u32 hash, unsigned int size)
88 {
89         return ((u64)hash * size) >> 32;
90 }
91
92 static u32 hash_bucket(u32 hash, const struct net *net)
93 {
94         return __hash_bucket(hash, net->ct.htable_size);
95 }
96
97 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
98                                   u16 zone, unsigned int size)
99 {
100         return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
101 }
102
103 static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
104                                        const struct nf_conntrack_tuple *tuple)
105 {
106         return __hash_conntrack(tuple, zone, net->ct.htable_size);
107 }
108
109 bool
110 nf_ct_get_tuple(const struct sk_buff *skb,
111                 unsigned int nhoff,
112                 unsigned int dataoff,
113                 u_int16_t l3num,
114                 u_int8_t protonum,
115                 struct nf_conntrack_tuple *tuple,
116                 const struct nf_conntrack_l3proto *l3proto,
117                 const struct nf_conntrack_l4proto *l4proto)
118 {
119         memset(tuple, 0, sizeof(*tuple));
120
121         tuple->src.l3num = l3num;
122         if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
123                 return false;
124
125         tuple->dst.protonum = protonum;
126         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
127
128         return l4proto->pkt_to_tuple(skb, dataoff, tuple);
129 }
130 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
131
132 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
133                        u_int16_t l3num, struct nf_conntrack_tuple *tuple)
134 {
135         struct nf_conntrack_l3proto *l3proto;
136         struct nf_conntrack_l4proto *l4proto;
137         unsigned int protoff;
138         u_int8_t protonum;
139         int ret;
140
141         rcu_read_lock();
142
143         l3proto = __nf_ct_l3proto_find(l3num);
144         ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
145         if (ret != NF_ACCEPT) {
146                 rcu_read_unlock();
147                 return false;
148         }
149
150         l4proto = __nf_ct_l4proto_find(l3num, protonum);
151
152         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
153                               l3proto, l4proto);
154
155         rcu_read_unlock();
156         return ret;
157 }
158 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
159
160 bool
161 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
162                    const struct nf_conntrack_tuple *orig,
163                    const struct nf_conntrack_l3proto *l3proto,
164                    const struct nf_conntrack_l4proto *l4proto)
165 {
166         memset(inverse, 0, sizeof(*inverse));
167
168         inverse->src.l3num = orig->src.l3num;
169         if (l3proto->invert_tuple(inverse, orig) == 0)
170                 return false;
171
172         inverse->dst.dir = !orig->dst.dir;
173
174         inverse->dst.protonum = orig->dst.protonum;
175         return l4proto->invert_tuple(inverse, orig);
176 }
177 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
178
179 static void
180 clean_from_lists(struct nf_conn *ct)
181 {
182         pr_debug("clean_from_lists(%p)\n", ct);
183         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
184         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
185
186         /* Destroy all pending expectations */
187         nf_ct_remove_expectations(ct);
188 }
189
190 static void
191 destroy_conntrack(struct nf_conntrack *nfct)
192 {
193         struct nf_conn *ct = (struct nf_conn *)nfct;
194         struct net *net = nf_ct_net(ct);
195         struct nf_conntrack_l4proto *l4proto;
196
197         pr_debug("destroy_conntrack(%p)\n", ct);
198         NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
199         NF_CT_ASSERT(!timer_pending(&ct->timeout));
200
201         /* To make sure we don't get any weird locking issues here:
202          * destroy_conntrack() MUST NOT be called with a write lock
203          * to nf_conntrack_lock!!! -HW */
204         rcu_read_lock();
205         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
206         if (l4proto && l4proto->destroy)
207                 l4proto->destroy(ct);
208
209         rcu_read_unlock();
210
211         spin_lock_bh(&nf_conntrack_lock);
212         /* Expectations will have been removed in clean_from_lists,
213          * except TFTP can create an expectation on the first packet,
214          * before connection is in the list, so we need to clean here,
215          * too. */
216         nf_ct_remove_expectations(ct);
217
218         /* We overload first tuple to link into unconfirmed list. */
219         if (!nf_ct_is_confirmed(ct)) {
220                 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
221                 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
222         }
223
224         NF_CT_STAT_INC(net, delete);
225         spin_unlock_bh(&nf_conntrack_lock);
226
227         if (ct->master)
228                 nf_ct_put(ct->master);
229
230         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
231         nf_conntrack_free(ct);
232 }
233
234 void nf_ct_delete_from_lists(struct nf_conn *ct)
235 {
236         struct net *net = nf_ct_net(ct);
237
238         nf_ct_helper_destroy(ct);
239         spin_lock_bh(&nf_conntrack_lock);
240         /* Inside lock so preempt is disabled on module removal path.
241          * Otherwise we can get spurious warnings. */
242         NF_CT_STAT_INC(net, delete_list);
243         clean_from_lists(ct);
244         spin_unlock_bh(&nf_conntrack_lock);
245 }
246 EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
247
248 static void death_by_event(unsigned long ul_conntrack)
249 {
250         struct nf_conn *ct = (void *)ul_conntrack;
251         struct net *net = nf_ct_net(ct);
252         struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
253
254         BUG_ON(ecache == NULL);
255
256         if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
257                 /* bad luck, let's retry again */
258                 ecache->timeout.expires = jiffies +
259                         (random32() % net->ct.sysctl_events_retry_timeout);
260                 add_timer(&ecache->timeout);
261                 return;
262         }
263         /* we've got the event delivered, now it's dying */
264         set_bit(IPS_DYING_BIT, &ct->status);
265         spin_lock(&nf_conntrack_lock);
266         hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
267         spin_unlock(&nf_conntrack_lock);
268         nf_ct_put(ct);
269 }
270
271 void nf_ct_insert_dying_list(struct nf_conn *ct)
272 {
273         struct net *net = nf_ct_net(ct);
274         struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
275
276         BUG_ON(ecache == NULL);
277
278         /* add this conntrack to the dying list */
279         spin_lock_bh(&nf_conntrack_lock);
280         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
281                              &net->ct.dying);
282         spin_unlock_bh(&nf_conntrack_lock);
283         /* set a new timer to retry event delivery */
284         setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
285         ecache->timeout.expires = jiffies +
286                 (random32() % net->ct.sysctl_events_retry_timeout);
287         add_timer(&ecache->timeout);
288 }
289 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
290
291 static void death_by_timeout(unsigned long ul_conntrack)
292 {
293         struct nf_conn *ct = (void *)ul_conntrack;
294         struct nf_conn_tstamp *tstamp;
295
296         tstamp = nf_conn_tstamp_find(ct);
297         if (tstamp && tstamp->stop == 0)
298                 tstamp->stop = ktime_to_ns(ktime_get_real());
299
300         if (!test_bit(IPS_DYING_BIT, &ct->status) &&
301             unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
302                 /* destroy event was not delivered */
303                 nf_ct_delete_from_lists(ct);
304                 nf_ct_insert_dying_list(ct);
305                 return;
306         }
307         set_bit(IPS_DYING_BIT, &ct->status);
308         nf_ct_delete_from_lists(ct);
309         nf_ct_put(ct);
310 }
311
312 /*
313  * Warning :
314  * - Caller must take a reference on returned object
315  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
316  * OR
317  * - Caller must lock nf_conntrack_lock before calling this function
318  */
319 static struct nf_conntrack_tuple_hash *
320 ____nf_conntrack_find(struct net *net, u16 zone,
321                       const struct nf_conntrack_tuple *tuple, u32 hash)
322 {
323         struct nf_conntrack_tuple_hash *h;
324         struct hlist_nulls_node *n;
325         unsigned int bucket = hash_bucket(hash, net);
326
327         /* Disable BHs the entire time since we normally need to disable them
328          * at least once for the stats anyway.
329          */
330         local_bh_disable();
331 begin:
332         hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
333                 if (nf_ct_tuple_equal(tuple, &h->tuple) &&
334                     nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
335                         NF_CT_STAT_INC(net, found);
336                         local_bh_enable();
337                         return h;
338                 }
339                 NF_CT_STAT_INC(net, searched);
340         }
341         /*
342          * if the nulls value we got at the end of this lookup is
343          * not the expected one, we must restart lookup.
344          * We probably met an item that was moved to another chain.
345          */
346         if (get_nulls_value(n) != bucket) {
347                 NF_CT_STAT_INC(net, search_restart);
348                 goto begin;
349         }
350         local_bh_enable();
351
352         return NULL;
353 }
354
355 struct nf_conntrack_tuple_hash *
356 __nf_conntrack_find(struct net *net, u16 zone,
357                     const struct nf_conntrack_tuple *tuple)
358 {
359         return ____nf_conntrack_find(net, zone, tuple,
360                                      hash_conntrack_raw(tuple, zone));
361 }
362 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
363
364 /* Find a connection corresponding to a tuple. */
365 static struct nf_conntrack_tuple_hash *
366 __nf_conntrack_find_get(struct net *net, u16 zone,
367                         const struct nf_conntrack_tuple *tuple, u32 hash)
368 {
369         struct nf_conntrack_tuple_hash *h;
370         struct nf_conn *ct;
371
372         rcu_read_lock();
373 begin:
374         h = ____nf_conntrack_find(net, zone, tuple, hash);
375         if (h) {
376                 ct = nf_ct_tuplehash_to_ctrack(h);
377                 if (unlikely(nf_ct_is_dying(ct) ||
378                              !atomic_inc_not_zero(&ct->ct_general.use)))
379                         h = NULL;
380                 else {
381                         if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
382                                      nf_ct_zone(ct) != zone)) {
383                                 nf_ct_put(ct);
384                                 goto begin;
385                         }
386                 }
387         }
388         rcu_read_unlock();
389
390         return h;
391 }
392
393 struct nf_conntrack_tuple_hash *
394 nf_conntrack_find_get(struct net *net, u16 zone,
395                       const struct nf_conntrack_tuple *tuple)
396 {
397         return __nf_conntrack_find_get(net, zone, tuple,
398                                        hash_conntrack_raw(tuple, zone));
399 }
400 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
401
402 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
403                                        unsigned int hash,
404                                        unsigned int repl_hash)
405 {
406         struct net *net = nf_ct_net(ct);
407
408         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
409                            &net->ct.hash[hash]);
410         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
411                            &net->ct.hash[repl_hash]);
412 }
413
414 int
415 nf_conntrack_hash_check_insert(struct nf_conn *ct)
416 {
417         struct net *net = nf_ct_net(ct);
418         unsigned int hash, repl_hash;
419         struct nf_conntrack_tuple_hash *h;
420         struct hlist_nulls_node *n;
421         u16 zone;
422
423         zone = nf_ct_zone(ct);
424         hash = hash_conntrack(net, zone,
425                               &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
426         repl_hash = hash_conntrack(net, zone,
427                                    &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
428
429         spin_lock_bh(&nf_conntrack_lock);
430
431         /* See if there's one in the list already, including reverse */
432         hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
433                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
434                                       &h->tuple) &&
435                     zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
436                         goto out;
437         hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
438                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
439                                       &h->tuple) &&
440                     zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
441                         goto out;
442
443         add_timer(&ct->timeout);
444         nf_conntrack_get(&ct->ct_general);
445         __nf_conntrack_hash_insert(ct, hash, repl_hash);
446         NF_CT_STAT_INC(net, insert);
447         spin_unlock_bh(&nf_conntrack_lock);
448
449         return 0;
450
451 out:
452         NF_CT_STAT_INC(net, insert_failed);
453         spin_unlock_bh(&nf_conntrack_lock);
454         return -EEXIST;
455 }
456 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
457
458 /* Confirm a connection given skb; places it in hash table */
459 int
460 __nf_conntrack_confirm(struct sk_buff *skb)
461 {
462         unsigned int hash, repl_hash;
463         struct nf_conntrack_tuple_hash *h;
464         struct nf_conn *ct;
465         struct nf_conn_help *help;
466         struct nf_conn_tstamp *tstamp;
467         struct hlist_nulls_node *n;
468         enum ip_conntrack_info ctinfo;
469         struct net *net;
470         u16 zone;
471
472         ct = nf_ct_get(skb, &ctinfo);
473         net = nf_ct_net(ct);
474
475         /* ipt_REJECT uses nf_conntrack_attach to attach related
476            ICMP/TCP RST packets in other direction.  Actual packet
477            which created connection will be IP_CT_NEW or for an
478            expected connection, IP_CT_RELATED. */
479         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
480                 return NF_ACCEPT;
481
482         zone = nf_ct_zone(ct);
483         /* reuse the hash saved before */
484         hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
485         hash = hash_bucket(hash, net);
486         repl_hash = hash_conntrack(net, zone,
487                                    &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
488
489         /* We're not in hash table, and we refuse to set up related
490            connections for unconfirmed conns.  But packet copies and
491            REJECT will give spurious warnings here. */
492         /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
493
494         /* No external references means no one else could have
495            confirmed us. */
496         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
497         pr_debug("Confirming conntrack %p\n", ct);
498
499         spin_lock_bh(&nf_conntrack_lock);
500
501         /* We have to check the DYING flag inside the lock to prevent
502            a race against nf_ct_get_next_corpse() possibly called from
503            user context, else we insert an already 'dead' hash, blocking
504            further use of that particular connection -JM */
505
506         if (unlikely(nf_ct_is_dying(ct))) {
507                 spin_unlock_bh(&nf_conntrack_lock);
508                 return NF_ACCEPT;
509         }
510
511         /* See if there's one in the list already, including reverse:
512            NAT could have grabbed it without realizing, since we're
513            not in the hash.  If there is, we lost race. */
514         hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
515                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
516                                       &h->tuple) &&
517                     zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
518                         goto out;
519         hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
520                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
521                                       &h->tuple) &&
522                     zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
523                         goto out;
524
525         /* Remove from unconfirmed list */
526         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
527
528         /* Timer relative to confirmation time, not original
529            setting time, otherwise we'd get timer wrap in
530            weird delay cases. */
531         ct->timeout.expires += jiffies;
532         add_timer(&ct->timeout);
533         atomic_inc(&ct->ct_general.use);
534         ct->status |= IPS_CONFIRMED;
535
536         /* set conntrack timestamp, if enabled. */
537         tstamp = nf_conn_tstamp_find(ct);
538         if (tstamp) {
539                 if (skb->tstamp.tv64 == 0)
540                         __net_timestamp(skb);
541
542                 tstamp->start = ktime_to_ns(skb->tstamp);
543         }
544         /* Since the lookup is lockless, hash insertion must be done after
545          * starting the timer and setting the CONFIRMED bit. The RCU barriers
546          * guarantee that no other CPU can find the conntrack before the above
547          * stores are visible.
548          */
549         __nf_conntrack_hash_insert(ct, hash, repl_hash);
550         NF_CT_STAT_INC(net, insert);
551         spin_unlock_bh(&nf_conntrack_lock);
552
553         help = nfct_help(ct);
554         if (help && help->helper)
555                 nf_conntrack_event_cache(IPCT_HELPER, ct);
556
557         nf_conntrack_event_cache(master_ct(ct) ?
558                                  IPCT_RELATED : IPCT_NEW, ct);
559         return NF_ACCEPT;
560
561 out:
562         NF_CT_STAT_INC(net, insert_failed);
563         spin_unlock_bh(&nf_conntrack_lock);
564         return NF_DROP;
565 }
566 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
567
568 /* Returns true if a connection correspondings to the tuple (required
569    for NAT). */
570 int
571 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
572                          const struct nf_conn *ignored_conntrack)
573 {
574         struct net *net = nf_ct_net(ignored_conntrack);
575         struct nf_conntrack_tuple_hash *h;
576         struct hlist_nulls_node *n;
577         struct nf_conn *ct;
578         u16 zone = nf_ct_zone(ignored_conntrack);
579         unsigned int hash = hash_conntrack(net, zone, tuple);
580
581         /* Disable BHs the entire time since we need to disable them at
582          * least once for the stats anyway.
583          */
584         rcu_read_lock_bh();
585         hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
586                 ct = nf_ct_tuplehash_to_ctrack(h);
587                 if (ct != ignored_conntrack &&
588                     nf_ct_tuple_equal(tuple, &h->tuple) &&
589                     nf_ct_zone(ct) == zone) {
590                         NF_CT_STAT_INC(net, found);
591                         rcu_read_unlock_bh();
592                         return 1;
593                 }
594                 NF_CT_STAT_INC(net, searched);
595         }
596         rcu_read_unlock_bh();
597
598         return 0;
599 }
600 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
601
602 #define NF_CT_EVICTION_RANGE    8
603
604 /* There's a small race here where we may free a just-assured
605    connection.  Too bad: we're in trouble anyway. */
606 static noinline int early_drop(struct net *net, unsigned int hash)
607 {
608         /* Use oldest entry, which is roughly LRU */
609         struct nf_conntrack_tuple_hash *h;
610         struct nf_conn *ct = NULL, *tmp;
611         struct hlist_nulls_node *n;
612         unsigned int i, cnt = 0;
613         int dropped = 0;
614
615         rcu_read_lock();
616         for (i = 0; i < net->ct.htable_size; i++) {
617                 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
618                                          hnnode) {
619                         tmp = nf_ct_tuplehash_to_ctrack(h);
620                         if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
621                                 ct = tmp;
622                         cnt++;
623                 }
624
625                 if (ct != NULL) {
626                         if (likely(!nf_ct_is_dying(ct) &&
627                                    atomic_inc_not_zero(&ct->ct_general.use)))
628                                 break;
629                         else
630                                 ct = NULL;
631                 }
632
633                 if (cnt >= NF_CT_EVICTION_RANGE)
634                         break;
635
636                 hash = (hash + 1) % net->ct.htable_size;
637         }
638         rcu_read_unlock();
639
640         if (!ct)
641                 return dropped;
642
643         if (del_timer(&ct->timeout)) {
644                 death_by_timeout((unsigned long)ct);
645                 /* Check if we indeed killed this entry. Reliable event
646                    delivery may have inserted it into the dying list. */
647                 if (test_bit(IPS_DYING_BIT, &ct->status)) {
648                         dropped = 1;
649                         NF_CT_STAT_INC_ATOMIC(net, early_drop);
650                 }
651         }
652         nf_ct_put(ct);
653         return dropped;
654 }
655
656 void init_nf_conntrack_hash_rnd(void)
657 {
658         unsigned int rand;
659
660         /*
661          * Why not initialize nf_conntrack_rnd in a "init()" function ?
662          * Because there isn't enough entropy when system initializing,
663          * and we initialize it as late as possible.
664          */
665         do {
666                 get_random_bytes(&rand, sizeof(rand));
667         } while (!rand);
668         cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
669 }
670
671 static struct nf_conn *
672 __nf_conntrack_alloc(struct net *net, u16 zone,
673                      const struct nf_conntrack_tuple *orig,
674                      const struct nf_conntrack_tuple *repl,
675                      gfp_t gfp, u32 hash)
676 {
677         struct nf_conn *ct;
678
679         if (unlikely(!nf_conntrack_hash_rnd)) {
680                 init_nf_conntrack_hash_rnd();
681                 /* recompute the hash as nf_conntrack_hash_rnd is initialized */
682                 hash = hash_conntrack_raw(orig, zone);
683         }
684
685         /* We don't want any race condition at early drop stage */
686         atomic_inc(&net->ct.count);
687
688         if (nf_conntrack_max &&
689             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
690                 if (!early_drop(net, hash_bucket(hash, net))) {
691                         atomic_dec(&net->ct.count);
692                         net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
693                         return ERR_PTR(-ENOMEM);
694                 }
695         }
696
697         /*
698          * Do not use kmem_cache_zalloc(), as this cache uses
699          * SLAB_DESTROY_BY_RCU.
700          */
701         ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
702         if (ct == NULL) {
703                 atomic_dec(&net->ct.count);
704                 return ERR_PTR(-ENOMEM);
705         }
706         /*
707          * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
708          * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
709          */
710         memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
711                offsetof(struct nf_conn, proto) -
712                offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
713         spin_lock_init(&ct->lock);
714         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
715         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
716         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
717         /* save hash for reusing when confirming */
718         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
719         /* Don't set timer yet: wait for confirmation */
720         setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
721         write_pnet(&ct->ct_net, net);
722 #ifdef CONFIG_NF_CONNTRACK_ZONES
723         if (zone) {
724                 struct nf_conntrack_zone *nf_ct_zone;
725
726                 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
727                 if (!nf_ct_zone)
728                         goto out_free;
729                 nf_ct_zone->id = zone;
730         }
731 #endif
732         /*
733          * changes to lookup keys must be done before setting refcnt to 1
734          */
735         smp_wmb();
736         atomic_set(&ct->ct_general.use, 1);
737         return ct;
738
739 #ifdef CONFIG_NF_CONNTRACK_ZONES
740 out_free:
741         atomic_dec(&net->ct.count);
742         kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
743         return ERR_PTR(-ENOMEM);
744 #endif
745 }
746
747 struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
748                                    const struct nf_conntrack_tuple *orig,
749                                    const struct nf_conntrack_tuple *repl,
750                                    gfp_t gfp)
751 {
752         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
753 }
754 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
755
756 void nf_conntrack_free(struct nf_conn *ct)
757 {
758         struct net *net = nf_ct_net(ct);
759
760         nf_ct_ext_destroy(ct);
761         atomic_dec(&net->ct.count);
762         nf_ct_ext_free(ct);
763         kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
764 }
765 EXPORT_SYMBOL_GPL(nf_conntrack_free);
766
767 /* Allocate a new conntrack: we return -ENOMEM if classification
768    failed due to stress.  Otherwise it really is unclassifiable. */
769 static struct nf_conntrack_tuple_hash *
770 init_conntrack(struct net *net, struct nf_conn *tmpl,
771                const struct nf_conntrack_tuple *tuple,
772                struct nf_conntrack_l3proto *l3proto,
773                struct nf_conntrack_l4proto *l4proto,
774                struct sk_buff *skb,
775                unsigned int dataoff, u32 hash)
776 {
777         struct nf_conn *ct;
778         struct nf_conn_help *help;
779         struct nf_conntrack_tuple repl_tuple;
780         struct nf_conntrack_ecache *ecache;
781         struct nf_conntrack_expect *exp;
782         u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
783         struct nf_conn_timeout *timeout_ext;
784         unsigned int *timeouts;
785
786         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
787                 pr_debug("Can't invert tuple.\n");
788                 return NULL;
789         }
790
791         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
792                                   hash);
793         if (IS_ERR(ct))
794                 return (struct nf_conntrack_tuple_hash *)ct;
795
796         timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
797         if (timeout_ext)
798                 timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
799         else
800                 timeouts = l4proto->get_timeouts(net);
801
802         if (!l4proto->new(ct, skb, dataoff, timeouts)) {
803                 nf_conntrack_free(ct);
804                 pr_debug("init conntrack: can't track with proto module\n");
805                 return NULL;
806         }
807
808         if (timeout_ext)
809                 nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC);
810
811         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
812         nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
813
814         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
815         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
816                                  ecache ? ecache->expmask : 0,
817                              GFP_ATOMIC);
818
819         spin_lock_bh(&nf_conntrack_lock);
820         exp = nf_ct_find_expectation(net, zone, tuple);
821         if (exp) {
822                 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
823                          ct, exp);
824                 /* Welcome, Mr. Bond.  We've been expecting you... */
825                 __set_bit(IPS_EXPECTED_BIT, &ct->status);
826                 ct->master = exp->master;
827                 if (exp->helper) {
828                         help = nf_ct_helper_ext_add(ct, exp->helper,
829                                                     GFP_ATOMIC);
830                         if (help)
831                                 rcu_assign_pointer(help->helper, exp->helper);
832                 }
833
834 #ifdef CONFIG_NF_CONNTRACK_MARK
835                 ct->mark = exp->master->mark;
836 #endif
837 #ifdef CONFIG_NF_CONNTRACK_SECMARK
838                 ct->secmark = exp->master->secmark;
839 #endif
840                 nf_conntrack_get(&ct->master->ct_general);
841                 NF_CT_STAT_INC(net, expect_new);
842         } else {
843                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
844                 NF_CT_STAT_INC(net, new);
845         }
846
847         /* Overload tuple linked list to put us in unconfirmed list. */
848         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
849                        &net->ct.unconfirmed);
850
851         spin_unlock_bh(&nf_conntrack_lock);
852
853         if (exp) {
854                 if (exp->expectfn)
855                         exp->expectfn(ct, exp);
856                 nf_ct_expect_put(exp);
857         }
858
859         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
860 }
861
862 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
863 static inline struct nf_conn *
864 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
865                   struct sk_buff *skb,
866                   unsigned int dataoff,
867                   u_int16_t l3num,
868                   u_int8_t protonum,
869                   struct nf_conntrack_l3proto *l3proto,
870                   struct nf_conntrack_l4proto *l4proto,
871                   int *set_reply,
872                   enum ip_conntrack_info *ctinfo)
873 {
874         struct nf_conntrack_tuple tuple;
875         struct nf_conntrack_tuple_hash *h;
876         struct nf_conn *ct;
877         u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
878         u32 hash;
879
880         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
881                              dataoff, l3num, protonum, &tuple, l3proto,
882                              l4proto)) {
883                 pr_debug("resolve_normal_ct: Can't get tuple\n");
884                 return NULL;
885         }
886
887         /* look for tuple match */
888         hash = hash_conntrack_raw(&tuple, zone);
889         h = __nf_conntrack_find_get(net, zone, &tuple, hash);
890         if (!h) {
891                 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
892                                    skb, dataoff, hash);
893                 if (!h)
894                         return NULL;
895                 if (IS_ERR(h))
896                         return (void *)h;
897         }
898         ct = nf_ct_tuplehash_to_ctrack(h);
899
900         /* It exists; we have (non-exclusive) reference. */
901         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
902                 *ctinfo = IP_CT_ESTABLISHED_REPLY;
903                 /* Please set reply bit if this packet OK */
904                 *set_reply = 1;
905         } else {
906                 /* Once we've had two way comms, always ESTABLISHED. */
907                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
908                         pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
909                         *ctinfo = IP_CT_ESTABLISHED;
910                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
911                         pr_debug("nf_conntrack_in: related packet for %p\n",
912                                  ct);
913                         *ctinfo = IP_CT_RELATED;
914                 } else {
915                         pr_debug("nf_conntrack_in: new packet for %p\n", ct);
916                         *ctinfo = IP_CT_NEW;
917                 }
918                 *set_reply = 0;
919         }
920         skb->nfct = &ct->ct_general;
921         skb->nfctinfo = *ctinfo;
922         return ct;
923 }
924
925 unsigned int
926 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
927                 struct sk_buff *skb)
928 {
929         struct nf_conn *ct, *tmpl = NULL;
930         enum ip_conntrack_info ctinfo;
931         struct nf_conntrack_l3proto *l3proto;
932         struct nf_conntrack_l4proto *l4proto;
933         struct nf_conn_timeout *timeout_ext;
934         unsigned int *timeouts;
935         unsigned int dataoff;
936         u_int8_t protonum;
937         int set_reply = 0;
938         int ret;
939
940         if (skb->nfct) {
941                 /* Previously seen (loopback or untracked)?  Ignore. */
942                 tmpl = (struct nf_conn *)skb->nfct;
943                 if (!nf_ct_is_template(tmpl)) {
944                         NF_CT_STAT_INC_ATOMIC(net, ignore);
945                         return NF_ACCEPT;
946                 }
947                 skb->nfct = NULL;
948         }
949
950         /* rcu_read_lock()ed by nf_hook_slow */
951         l3proto = __nf_ct_l3proto_find(pf);
952         ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
953                                    &dataoff, &protonum);
954         if (ret <= 0) {
955                 pr_debug("not prepared to track yet or error occurred\n");
956                 NF_CT_STAT_INC_ATOMIC(net, error);
957                 NF_CT_STAT_INC_ATOMIC(net, invalid);
958                 ret = -ret;
959                 goto out;
960         }
961
962         l4proto = __nf_ct_l4proto_find(pf, protonum);
963
964         /* It may be an special packet, error, unclean...
965          * inverse of the return code tells to the netfilter
966          * core what to do with the packet. */
967         if (l4proto->error != NULL) {
968                 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
969                                      pf, hooknum);
970                 if (ret <= 0) {
971                         NF_CT_STAT_INC_ATOMIC(net, error);
972                         NF_CT_STAT_INC_ATOMIC(net, invalid);
973                         ret = -ret;
974                         goto out;
975                 }
976                 /* ICMP[v6] protocol trackers may assign one conntrack. */
977                 if (skb->nfct)
978                         goto out;
979         }
980
981         ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
982                                l3proto, l4proto, &set_reply, &ctinfo);
983         if (!ct) {
984                 /* Not valid part of a connection */
985                 NF_CT_STAT_INC_ATOMIC(net, invalid);
986                 ret = NF_ACCEPT;
987                 goto out;
988         }
989
990         if (IS_ERR(ct)) {
991                 /* Too stressed to deal. */
992                 NF_CT_STAT_INC_ATOMIC(net, drop);
993                 ret = NF_DROP;
994                 goto out;
995         }
996
997         NF_CT_ASSERT(skb->nfct);
998
999         /* Decide what timeout policy we want to apply to this flow. */
1000         timeout_ext = nf_ct_timeout_find(ct);
1001         if (timeout_ext)
1002                 timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
1003         else
1004                 timeouts = l4proto->get_timeouts(net);
1005
1006         ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1007         if (ret <= 0) {
1008                 /* Invalid: inverse of the return code tells
1009                  * the netfilter core what to do */
1010                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1011                 nf_conntrack_put(skb->nfct);
1012                 skb->nfct = NULL;
1013                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1014                 if (ret == -NF_DROP)
1015                         NF_CT_STAT_INC_ATOMIC(net, drop);
1016                 ret = -ret;
1017                 goto out;
1018         }
1019
1020         if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1021                 nf_conntrack_event_cache(IPCT_REPLY, ct);
1022 out:
1023         if (tmpl) {
1024                 /* Special case: we have to repeat this hook, assign the
1025                  * template again to this packet. We assume that this packet
1026                  * has no conntrack assigned. This is used by nf_ct_tcp. */
1027                 if (ret == NF_REPEAT)
1028                         skb->nfct = (struct nf_conntrack *)tmpl;
1029                 else
1030                         nf_ct_put(tmpl);
1031         }
1032
1033         return ret;
1034 }
1035 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1036
1037 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1038                           const struct nf_conntrack_tuple *orig)
1039 {
1040         bool ret;
1041
1042         rcu_read_lock();
1043         ret = nf_ct_invert_tuple(inverse, orig,
1044                                  __nf_ct_l3proto_find(orig->src.l3num),
1045                                  __nf_ct_l4proto_find(orig->src.l3num,
1046                                                       orig->dst.protonum));
1047         rcu_read_unlock();
1048         return ret;
1049 }
1050 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1051
1052 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1053    implicitly racy: see __nf_conntrack_confirm */
1054 void nf_conntrack_alter_reply(struct nf_conn *ct,
1055                               const struct nf_conntrack_tuple *newreply)
1056 {
1057         struct nf_conn_help *help = nfct_help(ct);
1058
1059         /* Should be unconfirmed, so not in hash table yet */
1060         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1061
1062         pr_debug("Altering reply tuple of %p to ", ct);
1063         nf_ct_dump_tuple(newreply);
1064
1065         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1066         if (ct->master || (help && !hlist_empty(&help->expectations)))
1067                 return;
1068
1069         rcu_read_lock();
1070         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1071         rcu_read_unlock();
1072 }
1073 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1074
1075 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1076 void __nf_ct_refresh_acct(struct nf_conn *ct,
1077                           enum ip_conntrack_info ctinfo,
1078                           const struct sk_buff *skb,
1079                           unsigned long extra_jiffies,
1080                           int do_acct)
1081 {
1082         NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1083         NF_CT_ASSERT(skb);
1084
1085         /* Only update if this is not a fixed timeout */
1086         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1087                 goto acct;
1088
1089         /* If not in hash table, timer will not be active yet */
1090         if (!nf_ct_is_confirmed(ct)) {
1091                 ct->timeout.expires = extra_jiffies;
1092         } else {
1093                 unsigned long newtime = jiffies + extra_jiffies;
1094
1095                 /* Only update the timeout if the new timeout is at least
1096                    HZ jiffies from the old timeout. Need del_timer for race
1097                    avoidance (may already be dying). */
1098                 if (newtime - ct->timeout.expires >= HZ)
1099                         mod_timer_pending(&ct->timeout, newtime);
1100         }
1101
1102 acct:
1103         if (do_acct) {
1104                 struct nf_conn_counter *acct;
1105
1106                 acct = nf_conn_acct_find(ct);
1107                 if (acct) {
1108                         atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
1109                         atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
1110                 }
1111         }
1112 }
1113 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1114
1115 bool __nf_ct_kill_acct(struct nf_conn *ct,
1116                        enum ip_conntrack_info ctinfo,
1117                        const struct sk_buff *skb,
1118                        int do_acct)
1119 {
1120         if (do_acct) {
1121                 struct nf_conn_counter *acct;
1122
1123                 acct = nf_conn_acct_find(ct);
1124                 if (acct) {
1125                         atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
1126                         atomic64_add(skb->len - skb_network_offset(skb),
1127                                      &acct[CTINFO2DIR(ctinfo)].bytes);
1128                 }
1129         }
1130
1131         if (del_timer(&ct->timeout)) {
1132                 ct->timeout.function((unsigned long)ct);
1133                 return true;
1134         }
1135         return false;
1136 }
1137 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1138
1139 #ifdef CONFIG_NF_CONNTRACK_ZONES
1140 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1141         .len    = sizeof(struct nf_conntrack_zone),
1142         .align  = __alignof__(struct nf_conntrack_zone),
1143         .id     = NF_CT_EXT_ZONE,
1144 };
1145 #endif
1146
1147 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1148
1149 #include <linux/netfilter/nfnetlink.h>
1150 #include <linux/netfilter/nfnetlink_conntrack.h>
1151 #include <linux/mutex.h>
1152
1153 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1154  * in ip_conntrack_core, since we don't want the protocols to autoload
1155  * or depend on ctnetlink */
1156 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1157                                const struct nf_conntrack_tuple *tuple)
1158 {
1159         if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1160             nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1161                 goto nla_put_failure;
1162         return 0;
1163
1164 nla_put_failure:
1165         return -1;
1166 }
1167 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1168
1169 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1170         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1171         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1172 };
1173 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1174
1175 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1176                                struct nf_conntrack_tuple *t)
1177 {
1178         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1179                 return -EINVAL;
1180
1181         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1182         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1183
1184         return 0;
1185 }
1186 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1187
1188 int nf_ct_port_nlattr_tuple_size(void)
1189 {
1190         return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1191 }
1192 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1193 #endif
1194
1195 /* Used by ipt_REJECT and ip6t_REJECT. */
1196 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1197 {
1198         struct nf_conn *ct;
1199         enum ip_conntrack_info ctinfo;
1200
1201         /* This ICMP is in reverse direction to the packet which caused it */
1202         ct = nf_ct_get(skb, &ctinfo);
1203         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1204                 ctinfo = IP_CT_RELATED_REPLY;
1205         else
1206                 ctinfo = IP_CT_RELATED;
1207
1208         /* Attach to new skbuff, and increment count */
1209         nskb->nfct = &ct->ct_general;
1210         nskb->nfctinfo = ctinfo;
1211         nf_conntrack_get(nskb->nfct);
1212 }
1213
1214 /* Bring out ya dead! */
1215 static struct nf_conn *
1216 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1217                 void *data, unsigned int *bucket)
1218 {
1219         struct nf_conntrack_tuple_hash *h;
1220         struct nf_conn *ct;
1221         struct hlist_nulls_node *n;
1222
1223         spin_lock_bh(&nf_conntrack_lock);
1224         for (; *bucket < net->ct.htable_size; (*bucket)++) {
1225                 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1226                         ct = nf_ct_tuplehash_to_ctrack(h);
1227                         if (iter(ct, data))
1228                                 goto found;
1229                 }
1230         }
1231         hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
1232                 ct = nf_ct_tuplehash_to_ctrack(h);
1233                 if (iter(ct, data))
1234                         set_bit(IPS_DYING_BIT, &ct->status);
1235         }
1236         spin_unlock_bh(&nf_conntrack_lock);
1237         return NULL;
1238 found:
1239         atomic_inc(&ct->ct_general.use);
1240         spin_unlock_bh(&nf_conntrack_lock);
1241         return ct;
1242 }
1243
1244 void nf_ct_iterate_cleanup(struct net *net,
1245                            int (*iter)(struct nf_conn *i, void *data),
1246                            void *data)
1247 {
1248         struct nf_conn *ct;
1249         unsigned int bucket = 0;
1250
1251         while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1252                 /* Time to push up daises... */
1253                 if (del_timer(&ct->timeout))
1254                         death_by_timeout((unsigned long)ct);
1255                 /* ... else the timer will get him soon. */
1256
1257                 nf_ct_put(ct);
1258         }
1259 }
1260 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1261
1262 struct __nf_ct_flush_report {
1263         u32 pid;
1264         int report;
1265 };
1266
1267 static int kill_report(struct nf_conn *i, void *data)
1268 {
1269         struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1270         struct nf_conn_tstamp *tstamp;
1271
1272         tstamp = nf_conn_tstamp_find(i);
1273         if (tstamp && tstamp->stop == 0)
1274                 tstamp->stop = ktime_to_ns(ktime_get_real());
1275
1276         /* If we fail to deliver the event, death_by_timeout() will retry */
1277         if (nf_conntrack_event_report(IPCT_DESTROY, i,
1278                                       fr->pid, fr->report) < 0)
1279                 return 1;
1280
1281         /* Avoid the delivery of the destroy event in death_by_timeout(). */
1282         set_bit(IPS_DYING_BIT, &i->status);
1283         return 1;
1284 }
1285
1286 static int kill_all(struct nf_conn *i, void *data)
1287 {
1288         return 1;
1289 }
1290
1291 void nf_ct_free_hashtable(void *hash, unsigned int size)
1292 {
1293         if (is_vmalloc_addr(hash))
1294                 vfree(hash);
1295         else
1296                 free_pages((unsigned long)hash,
1297                            get_order(sizeof(struct hlist_head) * size));
1298 }
1299 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1300
1301 void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
1302 {
1303         struct __nf_ct_flush_report fr = {
1304                 .pid    = pid,
1305                 .report = report,
1306         };
1307         nf_ct_iterate_cleanup(net, kill_report, &fr);
1308 }
1309 EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
1310
1311 static void nf_ct_release_dying_list(struct net *net)
1312 {
1313         struct nf_conntrack_tuple_hash *h;
1314         struct nf_conn *ct;
1315         struct hlist_nulls_node *n;
1316
1317         spin_lock_bh(&nf_conntrack_lock);
1318         hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
1319                 ct = nf_ct_tuplehash_to_ctrack(h);
1320                 /* never fails to remove them, no listeners at this point */
1321                 nf_ct_kill(ct);
1322         }
1323         spin_unlock_bh(&nf_conntrack_lock);
1324 }
1325
1326 static int untrack_refs(void)
1327 {
1328         int cnt = 0, cpu;
1329
1330         for_each_possible_cpu(cpu) {
1331                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1332
1333                 cnt += atomic_read(&ct->ct_general.use) - 1;
1334         }
1335         return cnt;
1336 }
1337
1338 static void nf_conntrack_cleanup_init_net(void)
1339 {
1340         while (untrack_refs() > 0)
1341                 schedule();
1342
1343 #ifdef CONFIG_NF_CONNTRACK_ZONES
1344         nf_ct_extend_unregister(&nf_ct_zone_extend);
1345 #endif
1346 }
1347
1348 static void nf_conntrack_cleanup_net(struct net *net)
1349 {
1350  i_see_dead_people:
1351         nf_ct_iterate_cleanup(net, kill_all, NULL);
1352         nf_ct_release_dying_list(net);
1353         if (atomic_read(&net->ct.count) != 0) {
1354                 schedule();
1355                 goto i_see_dead_people;
1356         }
1357
1358         nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1359         nf_conntrack_helper_fini(net);
1360         nf_conntrack_timeout_fini(net);
1361         nf_conntrack_ecache_fini(net);
1362         nf_conntrack_tstamp_fini(net);
1363         nf_conntrack_acct_fini(net);
1364         nf_conntrack_expect_fini(net);
1365         kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1366         kfree(net->ct.slabname);
1367         free_percpu(net->ct.stat);
1368 }
1369
1370 /* Mishearing the voices in his head, our hero wonders how he's
1371    supposed to kill the mall. */
1372 void nf_conntrack_cleanup(struct net *net)
1373 {
1374         if (net_eq(net, &init_net))
1375                 RCU_INIT_POINTER(ip_ct_attach, NULL);
1376
1377         /* This makes sure all current packets have passed through
1378            netfilter framework.  Roll on, two-stage module
1379            delete... */
1380         synchronize_net();
1381         nf_conntrack_proto_fini(net);
1382         nf_conntrack_cleanup_net(net);
1383
1384         if (net_eq(net, &init_net)) {
1385                 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1386                 nf_conntrack_cleanup_init_net();
1387         }
1388 }
1389
1390 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1391 {
1392         struct hlist_nulls_head *hash;
1393         unsigned int nr_slots, i;
1394         size_t sz;
1395
1396         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1397         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1398         sz = nr_slots * sizeof(struct hlist_nulls_head);
1399         hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1400                                         get_order(sz));
1401         if (!hash) {
1402                 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1403                 hash = vzalloc(sz);
1404         }
1405
1406         if (hash && nulls)
1407                 for (i = 0; i < nr_slots; i++)
1408                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
1409
1410         return hash;
1411 }
1412 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1413
1414 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1415 {
1416         int i, bucket;
1417         unsigned int hashsize, old_size;
1418         struct hlist_nulls_head *hash, *old_hash;
1419         struct nf_conntrack_tuple_hash *h;
1420         struct nf_conn *ct;
1421
1422         if (current->nsproxy->net_ns != &init_net)
1423                 return -EOPNOTSUPP;
1424
1425         /* On boot, we can set this without any fancy locking. */
1426         if (!nf_conntrack_htable_size)
1427                 return param_set_uint(val, kp);
1428
1429         hashsize = simple_strtoul(val, NULL, 0);
1430         if (!hashsize)
1431                 return -EINVAL;
1432
1433         hash = nf_ct_alloc_hashtable(&hashsize, 1);
1434         if (!hash)
1435                 return -ENOMEM;
1436
1437         /* Lookups in the old hash might happen in parallel, which means we
1438          * might get false negatives during connection lookup. New connections
1439          * created because of a false negative won't make it into the hash
1440          * though since that required taking the lock.
1441          */
1442         spin_lock_bh(&nf_conntrack_lock);
1443         for (i = 0; i < init_net.ct.htable_size; i++) {
1444                 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1445                         h = hlist_nulls_entry(init_net.ct.hash[i].first,
1446                                         struct nf_conntrack_tuple_hash, hnnode);
1447                         ct = nf_ct_tuplehash_to_ctrack(h);
1448                         hlist_nulls_del_rcu(&h->hnnode);
1449                         bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
1450                                                   hashsize);
1451                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1452                 }
1453         }
1454         old_size = init_net.ct.htable_size;
1455         old_hash = init_net.ct.hash;
1456
1457         init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1458         init_net.ct.hash = hash;
1459         spin_unlock_bh(&nf_conntrack_lock);
1460
1461         nf_ct_free_hashtable(old_hash, old_size);
1462         return 0;
1463 }
1464 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1465
1466 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1467                   &nf_conntrack_htable_size, 0600);
1468
1469 void nf_ct_untracked_status_or(unsigned long bits)
1470 {
1471         int cpu;
1472
1473         for_each_possible_cpu(cpu)
1474                 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1475 }
1476 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1477
1478 static int nf_conntrack_init_init_net(void)
1479 {
1480         int max_factor = 8;
1481         int ret, cpu;
1482
1483         /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
1484          * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1485         if (!nf_conntrack_htable_size) {
1486                 nf_conntrack_htable_size
1487                         = (((totalram_pages << PAGE_SHIFT) / 16384)
1488                            / sizeof(struct hlist_head));
1489                 if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1490                         nf_conntrack_htable_size = 16384;
1491                 if (nf_conntrack_htable_size < 32)
1492                         nf_conntrack_htable_size = 32;
1493
1494                 /* Use a max. factor of four by default to get the same max as
1495                  * with the old struct list_heads. When a table size is given
1496                  * we use the old value of 8 to avoid reducing the max.
1497                  * entries. */
1498                 max_factor = 4;
1499         }
1500         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1501
1502         printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1503                NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1504                nf_conntrack_max);
1505 #ifdef CONFIG_NF_CONNTRACK_ZONES
1506         ret = nf_ct_extend_register(&nf_ct_zone_extend);
1507         if (ret < 0)
1508                 goto err_extend;
1509 #endif
1510         /* Set up fake conntrack: to never be deleted, not in any hashes */
1511         for_each_possible_cpu(cpu) {
1512                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1513                 write_pnet(&ct->ct_net, &init_net);
1514                 atomic_set(&ct->ct_general.use, 1);
1515         }
1516         /*  - and look it like as a confirmed connection */
1517         nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1518         return 0;
1519
1520 #ifdef CONFIG_NF_CONNTRACK_ZONES
1521 err_extend:
1522 #endif
1523         return ret;
1524 }
1525
1526 /*
1527  * We need to use special "null" values, not used in hash table
1528  */
1529 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
1530 #define DYING_NULLS_VAL         ((1<<30)+1)
1531
1532 static int nf_conntrack_init_net(struct net *net)
1533 {
1534         int ret;
1535
1536         atomic_set(&net->ct.count, 0);
1537         INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1538         INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1539         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1540         if (!net->ct.stat) {
1541                 ret = -ENOMEM;
1542                 goto err_stat;
1543         }
1544
1545         net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1546         if (!net->ct.slabname) {
1547                 ret = -ENOMEM;
1548                 goto err_slabname;
1549         }
1550
1551         net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1552                                                         sizeof(struct nf_conn), 0,
1553                                                         SLAB_DESTROY_BY_RCU, NULL);
1554         if (!net->ct.nf_conntrack_cachep) {
1555                 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1556                 ret = -ENOMEM;
1557                 goto err_cache;
1558         }
1559
1560         net->ct.htable_size = nf_conntrack_htable_size;
1561         net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
1562         if (!net->ct.hash) {
1563                 ret = -ENOMEM;
1564                 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1565                 goto err_hash;
1566         }
1567         ret = nf_conntrack_expect_init(net);
1568         if (ret < 0)
1569                 goto err_expect;
1570         ret = nf_conntrack_acct_init(net);
1571         if (ret < 0)
1572                 goto err_acct;
1573         ret = nf_conntrack_tstamp_init(net);
1574         if (ret < 0)
1575                 goto err_tstamp;
1576         ret = nf_conntrack_ecache_init(net);
1577         if (ret < 0)
1578                 goto err_ecache;
1579         ret = nf_conntrack_timeout_init(net);
1580         if (ret < 0)
1581                 goto err_timeout;
1582         ret = nf_conntrack_helper_init(net);
1583         if (ret < 0)
1584                 goto err_helper;
1585         return 0;
1586 err_helper:
1587         nf_conntrack_timeout_fini(net);
1588 err_timeout:
1589         nf_conntrack_ecache_fini(net);
1590 err_ecache:
1591         nf_conntrack_tstamp_fini(net);
1592 err_tstamp:
1593         nf_conntrack_acct_fini(net);
1594 err_acct:
1595         nf_conntrack_expect_fini(net);
1596 err_expect:
1597         nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1598 err_hash:
1599         kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1600 err_cache:
1601         kfree(net->ct.slabname);
1602 err_slabname:
1603         free_percpu(net->ct.stat);
1604 err_stat:
1605         return ret;
1606 }
1607
1608 s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1609                         enum ip_conntrack_dir dir,
1610                         u32 seq);
1611 EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
1612
1613 int nf_conntrack_init(struct net *net)
1614 {
1615         int ret;
1616
1617         if (net_eq(net, &init_net)) {
1618                 ret = nf_conntrack_init_init_net();
1619                 if (ret < 0)
1620                         goto out_init_net;
1621         }
1622         ret = nf_conntrack_proto_init(net);
1623         if (ret < 0)
1624                 goto out_proto;
1625         ret = nf_conntrack_init_net(net);
1626         if (ret < 0)
1627                 goto out_net;
1628
1629         if (net_eq(net, &init_net)) {
1630                 /* For use by REJECT target */
1631                 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1632                 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1633
1634                 /* Howto get NAT offsets */
1635                 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
1636         }
1637         return 0;
1638
1639 out_net:
1640         nf_conntrack_proto_fini(net);
1641 out_proto:
1642         if (net_eq(net, &init_net))
1643                 nf_conntrack_cleanup_init_net();
1644 out_init_net:
1645         return ret;
1646 }