2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #define IP_NF_ASSERT(x)
61 /* All the better to debug you with... */
66 void *ip6t_alloc_initial_table(const struct xt_table *info)
68 return xt_alloc_initial_table(ip6t, IP6T);
70 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
73 We keep a set of rules for each CPU, so we can avoid write-locking
74 them in the softirq when updating the counters and therefore
75 only need to read-lock in the softirq; doing a write_lock_bh() in user
76 context stops packets coming through and allows user context to read
77 the counters or update the rules.
79 Hence the start of any table is given by get_table() below. */
81 /* Returns whether matches rule or not. */
82 /* Performance critical - called for every packet */
84 ip6_packet_match(const struct sk_buff *skb,
87 const struct ip6t_ip6 *ip6info,
88 unsigned int *protoff,
89 int *fragoff, bool *hotdrop)
92 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
94 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
96 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
97 &ip6info->src), IP6T_INV_SRCIP) ||
98 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
99 &ip6info->dst), IP6T_INV_DSTIP)) {
100 dprintf("Source or dest mismatch.\n");
102 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
103 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
104 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
105 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
106 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
107 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
111 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
113 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
114 dprintf("VIA in mismatch (%s vs %s).%s\n",
115 indev, ip6info->iniface,
116 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
120 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
122 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
123 dprintf("VIA out mismatch (%s vs %s).%s\n",
124 outdev, ip6info->outiface,
125 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
129 /* ... might want to do something with class and flowlabel here ... */
131 /* look for the desired protocol header */
132 if((ip6info->flags & IP6T_F_PROTO)) {
134 unsigned short _frag_off;
136 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
142 *fragoff = _frag_off;
144 dprintf("Packet protocol %hi ?= %s%hi.\n",
146 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
149 if (ip6info->proto == protohdr) {
150 if(ip6info->invflags & IP6T_INV_PROTO) {
156 /* We need match for the '-p all', too! */
157 if ((ip6info->proto != 0) &&
158 !(ip6info->invflags & IP6T_INV_PROTO))
164 /* should be ip6 safe */
166 ip6_checkentry(const struct ip6t_ip6 *ipv6)
168 if (ipv6->flags & ~IP6T_F_MASK) {
169 duprintf("Unknown flag bits set: %08X\n",
170 ipv6->flags & ~IP6T_F_MASK);
173 if (ipv6->invflags & ~IP6T_INV_MASK) {
174 duprintf("Unknown invflag bits set: %08X\n",
175 ipv6->invflags & ~IP6T_INV_MASK);
182 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
184 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
189 static inline struct ip6t_entry *
190 get_entry(const void *base, unsigned int offset)
192 return (struct ip6t_entry *)(base + offset);
195 /* All zeroes == unconditional rule. */
196 /* Mildly perf critical (only if packet tracing is on) */
197 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
199 static const struct ip6t_ip6 uncond;
201 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
204 static inline const struct xt_entry_target *
205 ip6t_get_target_c(const struct ip6t_entry *e)
207 return ip6t_get_target((struct ip6t_entry *)e);
210 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
211 /* This cries for unification! */
212 static const char *const hooknames[] = {
213 [NF_INET_PRE_ROUTING] = "PREROUTING",
214 [NF_INET_LOCAL_IN] = "INPUT",
215 [NF_INET_FORWARD] = "FORWARD",
216 [NF_INET_LOCAL_OUT] = "OUTPUT",
217 [NF_INET_POST_ROUTING] = "POSTROUTING",
220 enum nf_ip_trace_comments {
221 NF_IP6_TRACE_COMMENT_RULE,
222 NF_IP6_TRACE_COMMENT_RETURN,
223 NF_IP6_TRACE_COMMENT_POLICY,
226 static const char *const comments[] = {
227 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
228 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
229 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
232 static struct nf_loginfo trace_loginfo = {
233 .type = NF_LOG_TYPE_LOG,
237 .logflags = NF_LOG_MASK,
242 /* Mildly perf critical (only if packet tracing is on) */
244 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
245 const char *hookname, const char **chainname,
246 const char **comment, unsigned int *rulenum)
248 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
250 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
251 /* Head of user chain: ERROR target with chainname */
252 *chainname = t->target.data;
257 if (s->target_offset == sizeof(struct ip6t_entry) &&
258 strcmp(t->target.u.kernel.target->name,
259 XT_STANDARD_TARGET) == 0 &&
261 unconditional(&s->ipv6)) {
262 /* Tail of chains: STANDARD target (return/policy) */
263 *comment = *chainname == hookname
264 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
265 : comments[NF_IP6_TRACE_COMMENT_RETURN];
274 static void trace_packet(const struct sk_buff *skb,
276 const struct net_device *in,
277 const struct net_device *out,
278 const char *tablename,
279 const struct xt_table_info *private,
280 const struct ip6t_entry *e)
282 const void *table_base;
283 const struct ip6t_entry *root;
284 const char *hookname, *chainname, *comment;
285 const struct ip6t_entry *iter;
286 unsigned int rulenum = 0;
288 table_base = private->entries[smp_processor_id()];
289 root = get_entry(table_base, private->hook_entry[hook]);
291 hookname = chainname = hooknames[hook];
292 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
294 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
295 if (get_chainname_rulenum(iter, e, hookname,
296 &chainname, &comment, &rulenum) != 0)
299 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
300 "TRACE: %s:%s:%s:%u ",
301 tablename, chainname, comment, rulenum);
305 static inline __pure struct ip6t_entry *
306 ip6t_next_entry(const struct ip6t_entry *entry)
308 return (void *)entry + entry->next_offset;
311 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
313 ip6t_do_table(struct sk_buff *skb,
315 const struct net_device *in,
316 const struct net_device *out,
317 struct xt_table *table)
319 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
320 /* Initializing verdict to NF_DROP keeps gcc happy. */
321 unsigned int verdict = NF_DROP;
322 const char *indev, *outdev;
323 const void *table_base;
324 struct ip6t_entry *e, **jumpstack;
325 unsigned int *stackptr, origptr, cpu;
326 const struct xt_table_info *private;
327 struct xt_action_param acpar;
331 indev = in ? in->name : nulldevname;
332 outdev = out ? out->name : nulldevname;
333 /* We handle fragments by dealing with the first fragment as
334 * if it was a normal packet. All other fragments are treated
335 * normally, except that they will NEVER match rules that ask
336 * things we don't know, ie. tcp syn flag or ports). If the
337 * rule is also a fragment-specific rule, non-fragments won't
339 acpar.hotdrop = false;
342 acpar.family = NFPROTO_IPV6;
343 acpar.hooknum = hook;
345 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348 addend = xt_write_recseq_begin();
349 private = table->private;
350 cpu = smp_processor_id();
351 table_base = private->entries[cpu];
352 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
353 stackptr = per_cpu_ptr(private->stackptr, cpu);
356 e = get_entry(table_base, private->hook_entry[hook]);
359 const struct xt_entry_target *t;
360 const struct xt_entry_match *ematch;
364 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
365 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
367 e = ip6t_next_entry(e);
371 xt_ematch_foreach(ematch, e) {
372 acpar.match = ematch->u.kernel.match;
373 acpar.matchinfo = ematch->data;
374 if (!acpar.match->match(skb, &acpar))
378 ADD_COUNTER(e->counters, skb->len, 1);
380 t = ip6t_get_target_c(e);
381 IP_NF_ASSERT(t->u.kernel.target);
383 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
384 /* The packet is traced: log it */
385 if (unlikely(skb->nf_trace))
386 trace_packet(skb, hook, in, out,
387 table->name, private, e);
389 /* Standard target? */
390 if (!t->u.kernel.target->target) {
393 v = ((struct xt_standard_target *)t)->verdict;
395 /* Pop from stack? */
396 if (v != XT_RETURN) {
397 verdict = (unsigned int)(-v) - 1;
400 if (*stackptr <= origptr)
401 e = get_entry(table_base,
402 private->underflow[hook]);
404 e = ip6t_next_entry(jumpstack[--*stackptr]);
407 if (table_base + v != ip6t_next_entry(e) &&
408 !(e->ipv6.flags & IP6T_F_GOTO)) {
409 if (*stackptr >= private->stacksize) {
413 jumpstack[(*stackptr)++] = e;
416 e = get_entry(table_base, v);
420 acpar.target = t->u.kernel.target;
421 acpar.targinfo = t->data;
423 verdict = t->u.kernel.target->target(skb, &acpar);
424 if (verdict == XT_CONTINUE)
425 e = ip6t_next_entry(e);
429 } while (!acpar.hotdrop);
433 xt_write_recseq_end(addend);
436 #ifdef DEBUG_ALLOW_ALL
445 /* Figures out from what hook each rule can be called: returns 0 if
446 there are loops. Puts hook bitmask in comefrom. */
448 mark_source_chains(const struct xt_table_info *newinfo,
449 unsigned int valid_hooks, void *entry0)
453 /* No recursion; use packet counter to save back ptrs (reset
454 to 0 as we leave), and comefrom to save source hook bitmask */
455 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
456 unsigned int pos = newinfo->hook_entry[hook];
457 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
459 if (!(valid_hooks & (1 << hook)))
462 /* Set initial back pointer. */
463 e->counters.pcnt = pos;
466 const struct xt_standard_target *t
467 = (void *)ip6t_get_target_c(e);
468 int visited = e->comefrom & (1 << hook);
470 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
471 pr_err("iptables: loop hook %u pos %u %08X.\n",
472 hook, pos, e->comefrom);
475 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
477 /* Unconditional return/END. */
478 if ((e->target_offset == sizeof(struct ip6t_entry) &&
479 (strcmp(t->target.u.user.name,
480 XT_STANDARD_TARGET) == 0) &&
482 unconditional(&e->ipv6)) || visited) {
483 unsigned int oldpos, size;
485 if ((strcmp(t->target.u.user.name,
486 XT_STANDARD_TARGET) == 0) &&
487 t->verdict < -NF_MAX_VERDICT - 1) {
488 duprintf("mark_source_chains: bad "
489 "negative verdict (%i)\n",
494 /* Return: backtrack through the last
497 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
498 #ifdef DEBUG_IP_FIREWALL_USER
500 & (1 << NF_INET_NUMHOOKS)) {
501 duprintf("Back unset "
508 pos = e->counters.pcnt;
509 e->counters.pcnt = 0;
511 /* We're at the start. */
515 e = (struct ip6t_entry *)
517 } while (oldpos == pos + e->next_offset);
520 size = e->next_offset;
521 e = (struct ip6t_entry *)
522 (entry0 + pos + size);
523 e->counters.pcnt = pos;
526 int newpos = t->verdict;
528 if (strcmp(t->target.u.user.name,
529 XT_STANDARD_TARGET) == 0 &&
531 if (newpos > newinfo->size -
532 sizeof(struct ip6t_entry)) {
533 duprintf("mark_source_chains: "
534 "bad verdict (%i)\n",
538 /* This a jump; chase it. */
539 duprintf("Jump rule %u -> %u\n",
542 /* ... this is a fallthru */
543 newpos = pos + e->next_offset;
545 e = (struct ip6t_entry *)
547 e->counters.pcnt = pos;
552 duprintf("Finished chain %u\n", hook);
557 static void cleanup_match(struct xt_entry_match *m, struct net *net)
559 struct xt_mtdtor_param par;
562 par.match = m->u.kernel.match;
563 par.matchinfo = m->data;
564 par.family = NFPROTO_IPV6;
565 if (par.match->destroy != NULL)
566 par.match->destroy(&par);
567 module_put(par.match->me);
571 check_entry(const struct ip6t_entry *e, const char *name)
573 const struct xt_entry_target *t;
575 if (!ip6_checkentry(&e->ipv6)) {
576 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
580 if (e->target_offset + sizeof(struct xt_entry_target) >
584 t = ip6t_get_target_c(e);
585 if (e->target_offset + t->u.target_size > e->next_offset)
591 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
593 const struct ip6t_ip6 *ipv6 = par->entryinfo;
596 par->match = m->u.kernel.match;
597 par->matchinfo = m->data;
599 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
600 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
602 duprintf("ip_tables: check failed for `%s'.\n",
610 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
612 struct xt_match *match;
615 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
618 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
619 return PTR_ERR(match);
621 m->u.kernel.match = match;
623 ret = check_match(m, par);
629 module_put(m->u.kernel.match->me);
633 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
635 struct xt_entry_target *t = ip6t_get_target(e);
636 struct xt_tgchk_param par = {
640 .target = t->u.kernel.target,
642 .hook_mask = e->comefrom,
643 .family = NFPROTO_IPV6,
647 t = ip6t_get_target(e);
648 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
649 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
651 duprintf("ip_tables: check failed for `%s'.\n",
652 t->u.kernel.target->name);
659 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
662 struct xt_entry_target *t;
663 struct xt_target *target;
666 struct xt_mtchk_param mtpar;
667 struct xt_entry_match *ematch;
669 ret = check_entry(e, name);
676 mtpar.entryinfo = &e->ipv6;
677 mtpar.hook_mask = e->comefrom;
678 mtpar.family = NFPROTO_IPV6;
679 xt_ematch_foreach(ematch, e) {
680 ret = find_check_match(ematch, &mtpar);
682 goto cleanup_matches;
686 t = ip6t_get_target(e);
687 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
689 if (IS_ERR(target)) {
690 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
691 ret = PTR_ERR(target);
692 goto cleanup_matches;
694 t->u.kernel.target = target;
696 ret = check_target(e, net, name);
701 module_put(t->u.kernel.target->me);
703 xt_ematch_foreach(ematch, e) {
706 cleanup_match(ematch, net);
711 static bool check_underflow(const struct ip6t_entry *e)
713 const struct xt_entry_target *t;
714 unsigned int verdict;
716 if (!unconditional(&e->ipv6))
718 t = ip6t_get_target_c(e);
719 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
721 verdict = ((struct xt_standard_target *)t)->verdict;
722 verdict = -verdict - 1;
723 return verdict == NF_DROP || verdict == NF_ACCEPT;
727 check_entry_size_and_hooks(struct ip6t_entry *e,
728 struct xt_table_info *newinfo,
729 const unsigned char *base,
730 const unsigned char *limit,
731 const unsigned int *hook_entries,
732 const unsigned int *underflows,
733 unsigned int valid_hooks)
737 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
738 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
739 duprintf("Bad offset %p\n", e);
744 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
745 duprintf("checking: element %p size %u\n",
750 /* Check hooks & underflows */
751 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
752 if (!(valid_hooks & (1 << h)))
754 if ((unsigned char *)e - base == hook_entries[h])
755 newinfo->hook_entry[h] = hook_entries[h];
756 if ((unsigned char *)e - base == underflows[h]) {
757 if (!check_underflow(e)) {
758 pr_err("Underflows must be unconditional and "
759 "use the STANDARD target with "
763 newinfo->underflow[h] = underflows[h];
767 /* Clear counters and comefrom */
768 e->counters = ((struct xt_counters) { 0, 0 });
773 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
775 struct xt_tgdtor_param par;
776 struct xt_entry_target *t;
777 struct xt_entry_match *ematch;
779 /* Cleanup all matches */
780 xt_ematch_foreach(ematch, e)
781 cleanup_match(ematch, net);
782 t = ip6t_get_target(e);
785 par.target = t->u.kernel.target;
786 par.targinfo = t->data;
787 par.family = NFPROTO_IPV6;
788 if (par.target->destroy != NULL)
789 par.target->destroy(&par);
790 module_put(par.target->me);
793 /* Checks and translates the user-supplied table segment (held in
796 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
797 const struct ip6t_replace *repl)
799 struct ip6t_entry *iter;
803 newinfo->size = repl->size;
804 newinfo->number = repl->num_entries;
806 /* Init all hooks to impossible value. */
807 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
808 newinfo->hook_entry[i] = 0xFFFFFFFF;
809 newinfo->underflow[i] = 0xFFFFFFFF;
812 duprintf("translate_table: size %u\n", newinfo->size);
814 /* Walk through entries, checking offsets. */
815 xt_entry_foreach(iter, entry0, newinfo->size) {
816 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
824 if (strcmp(ip6t_get_target(iter)->u.user.name,
825 XT_ERROR_TARGET) == 0)
826 ++newinfo->stacksize;
829 if (i != repl->num_entries) {
830 duprintf("translate_table: %u not %u entries\n",
831 i, repl->num_entries);
835 /* Check hooks all assigned */
836 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
837 /* Only hooks which are valid */
838 if (!(repl->valid_hooks & (1 << i)))
840 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
841 duprintf("Invalid hook entry %u %u\n",
842 i, repl->hook_entry[i]);
845 if (newinfo->underflow[i] == 0xFFFFFFFF) {
846 duprintf("Invalid underflow %u %u\n",
847 i, repl->underflow[i]);
852 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
855 /* Finally, each sanity check must pass */
857 xt_entry_foreach(iter, entry0, newinfo->size) {
858 ret = find_check_entry(iter, net, repl->name, repl->size);
865 xt_entry_foreach(iter, entry0, newinfo->size) {
868 cleanup_entry(iter, net);
873 /* And one copy for every other CPU */
874 for_each_possible_cpu(i) {
875 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
876 memcpy(newinfo->entries[i], entry0, newinfo->size);
883 get_counters(const struct xt_table_info *t,
884 struct xt_counters counters[])
886 struct ip6t_entry *iter;
890 for_each_possible_cpu(cpu) {
891 seqcount_t *s = &per_cpu(xt_recseq, cpu);
894 xt_entry_foreach(iter, t->entries[cpu], t->size) {
899 start = read_seqcount_begin(s);
900 bcnt = iter->counters.bcnt;
901 pcnt = iter->counters.pcnt;
902 } while (read_seqcount_retry(s, start));
904 ADD_COUNTER(counters[i], bcnt, pcnt);
910 static struct xt_counters *alloc_counters(const struct xt_table *table)
912 unsigned int countersize;
913 struct xt_counters *counters;
914 const struct xt_table_info *private = table->private;
916 /* We need atomic snapshot of counters: rest doesn't change
917 (other than comefrom, which userspace doesn't care
919 countersize = sizeof(struct xt_counters) * private->number;
920 counters = vzalloc(countersize);
922 if (counters == NULL)
923 return ERR_PTR(-ENOMEM);
925 get_counters(private, counters);
931 copy_entries_to_user(unsigned int total_size,
932 const struct xt_table *table,
933 void __user *userptr)
935 unsigned int off, num;
936 const struct ip6t_entry *e;
937 struct xt_counters *counters;
938 const struct xt_table_info *private = table->private;
940 const void *loc_cpu_entry;
942 counters = alloc_counters(table);
943 if (IS_ERR(counters))
944 return PTR_ERR(counters);
946 /* choose the copy that is on our node/cpu, ...
947 * This choice is lazy (because current thread is
948 * allowed to migrate to another cpu)
950 loc_cpu_entry = private->entries[raw_smp_processor_id()];
951 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
956 /* FIXME: use iterator macros --RR */
957 /* ... then go back and fix counters and names */
958 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
960 const struct xt_entry_match *m;
961 const struct xt_entry_target *t;
963 e = (struct ip6t_entry *)(loc_cpu_entry + off);
964 if (copy_to_user(userptr + off
965 + offsetof(struct ip6t_entry, counters),
967 sizeof(counters[num])) != 0) {
972 for (i = sizeof(struct ip6t_entry);
973 i < e->target_offset;
974 i += m->u.match_size) {
977 if (copy_to_user(userptr + off + i
978 + offsetof(struct xt_entry_match,
980 m->u.kernel.match->name,
981 strlen(m->u.kernel.match->name)+1)
988 t = ip6t_get_target_c(e);
989 if (copy_to_user(userptr + off + e->target_offset
990 + offsetof(struct xt_entry_target,
992 t->u.kernel.target->name,
993 strlen(t->u.kernel.target->name)+1) != 0) {
1004 #ifdef CONFIG_COMPAT
1005 static void compat_standard_from_user(void *dst, const void *src)
1007 int v = *(compat_int_t *)src;
1010 v += xt_compat_calc_jump(AF_INET6, v);
1011 memcpy(dst, &v, sizeof(v));
1014 static int compat_standard_to_user(void __user *dst, const void *src)
1016 compat_int_t cv = *(int *)src;
1019 cv -= xt_compat_calc_jump(AF_INET6, cv);
1020 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1023 static int compat_calc_entry(const struct ip6t_entry *e,
1024 const struct xt_table_info *info,
1025 const void *base, struct xt_table_info *newinfo)
1027 const struct xt_entry_match *ematch;
1028 const struct xt_entry_target *t;
1029 unsigned int entry_offset;
1032 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1033 entry_offset = (void *)e - base;
1034 xt_ematch_foreach(ematch, e)
1035 off += xt_compat_match_offset(ematch->u.kernel.match);
1036 t = ip6t_get_target_c(e);
1037 off += xt_compat_target_offset(t->u.kernel.target);
1038 newinfo->size -= off;
1039 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1043 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1044 if (info->hook_entry[i] &&
1045 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1046 newinfo->hook_entry[i] -= off;
1047 if (info->underflow[i] &&
1048 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1049 newinfo->underflow[i] -= off;
1054 static int compat_table_info(const struct xt_table_info *info,
1055 struct xt_table_info *newinfo)
1057 struct ip6t_entry *iter;
1058 void *loc_cpu_entry;
1061 if (!newinfo || !info)
1064 /* we dont care about newinfo->entries[] */
1065 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1066 newinfo->initial_entries = 0;
1067 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1068 xt_compat_init_offsets(AF_INET6, info->number);
1069 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1070 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1078 static int get_info(struct net *net, void __user *user,
1079 const int *len, int compat)
1081 char name[XT_TABLE_MAXNAMELEN];
1085 if (*len != sizeof(struct ip6t_getinfo)) {
1086 duprintf("length %u != %zu\n", *len,
1087 sizeof(struct ip6t_getinfo));
1091 if (copy_from_user(name, user, sizeof(name)) != 0)
1094 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1095 #ifdef CONFIG_COMPAT
1097 xt_compat_lock(AF_INET6);
1099 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1100 "ip6table_%s", name);
1101 if (!IS_ERR_OR_NULL(t)) {
1102 struct ip6t_getinfo info;
1103 const struct xt_table_info *private = t->private;
1104 #ifdef CONFIG_COMPAT
1105 struct xt_table_info tmp;
1108 ret = compat_table_info(private, &tmp);
1109 xt_compat_flush_offsets(AF_INET6);
1113 memset(&info, 0, sizeof(info));
1114 info.valid_hooks = t->valid_hooks;
1115 memcpy(info.hook_entry, private->hook_entry,
1116 sizeof(info.hook_entry));
1117 memcpy(info.underflow, private->underflow,
1118 sizeof(info.underflow));
1119 info.num_entries = private->number;
1120 info.size = private->size;
1121 strcpy(info.name, name);
1123 if (copy_to_user(user, &info, *len) != 0)
1131 ret = t ? PTR_ERR(t) : -ENOENT;
1132 #ifdef CONFIG_COMPAT
1134 xt_compat_unlock(AF_INET6);
1140 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1144 struct ip6t_get_entries get;
1147 if (*len < sizeof(get)) {
1148 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1151 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1153 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1154 duprintf("get_entries: %u != %zu\n",
1155 *len, sizeof(get) + get.size);
1159 t = xt_find_table_lock(net, AF_INET6, get.name);
1160 if (!IS_ERR_OR_NULL(t)) {
1161 struct xt_table_info *private = t->private;
1162 duprintf("t->private->number = %u\n", private->number);
1163 if (get.size == private->size)
1164 ret = copy_entries_to_user(private->size,
1165 t, uptr->entrytable);
1167 duprintf("get_entries: I've got %u not %u!\n",
1168 private->size, get.size);
1174 ret = t ? PTR_ERR(t) : -ENOENT;
1180 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1181 struct xt_table_info *newinfo, unsigned int num_counters,
1182 void __user *counters_ptr)
1186 struct xt_table_info *oldinfo;
1187 struct xt_counters *counters;
1188 const void *loc_cpu_old_entry;
1189 struct ip6t_entry *iter;
1192 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1198 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1199 "ip6table_%s", name);
1200 if (IS_ERR_OR_NULL(t)) {
1201 ret = t ? PTR_ERR(t) : -ENOENT;
1202 goto free_newinfo_counters_untrans;
1206 if (valid_hooks != t->valid_hooks) {
1207 duprintf("Valid hook crap: %08X vs %08X\n",
1208 valid_hooks, t->valid_hooks);
1213 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1217 /* Update module usage count based on number of rules */
1218 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1219 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1220 if ((oldinfo->number > oldinfo->initial_entries) ||
1221 (newinfo->number <= oldinfo->initial_entries))
1223 if ((oldinfo->number > oldinfo->initial_entries) &&
1224 (newinfo->number <= oldinfo->initial_entries))
1227 /* Get the old counters, and synchronize with replace */
1228 get_counters(oldinfo, counters);
1230 /* Decrease module usage counts and free resource */
1231 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1232 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1233 cleanup_entry(iter, net);
1235 xt_free_table_info(oldinfo);
1236 if (copy_to_user(counters_ptr, counters,
1237 sizeof(struct xt_counters) * num_counters) != 0)
1246 free_newinfo_counters_untrans:
1253 do_replace(struct net *net, const void __user *user, unsigned int len)
1256 struct ip6t_replace tmp;
1257 struct xt_table_info *newinfo;
1258 void *loc_cpu_entry;
1259 struct ip6t_entry *iter;
1261 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1264 /* overflow check */
1265 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1267 tmp.name[sizeof(tmp.name)-1] = 0;
1269 newinfo = xt_alloc_table_info(tmp.size);
1273 /* choose the copy that is on our node/cpu */
1274 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1275 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1281 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1285 duprintf("ip_tables: Translated table\n");
1287 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1288 tmp.num_counters, tmp.counters);
1290 goto free_newinfo_untrans;
1293 free_newinfo_untrans:
1294 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1295 cleanup_entry(iter, net);
1297 xt_free_table_info(newinfo);
1302 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1305 unsigned int i, curcpu;
1306 struct xt_counters_info tmp;
1307 struct xt_counters *paddc;
1308 unsigned int num_counters;
1313 const struct xt_table_info *private;
1315 const void *loc_cpu_entry;
1316 struct ip6t_entry *iter;
1317 unsigned int addend;
1318 #ifdef CONFIG_COMPAT
1319 struct compat_xt_counters_info compat_tmp;
1323 size = sizeof(struct compat_xt_counters_info);
1328 size = sizeof(struct xt_counters_info);
1331 if (copy_from_user(ptmp, user, size) != 0)
1334 #ifdef CONFIG_COMPAT
1336 num_counters = compat_tmp.num_counters;
1337 name = compat_tmp.name;
1341 num_counters = tmp.num_counters;
1345 if (len != size + num_counters * sizeof(struct xt_counters))
1348 paddc = vmalloc(len - size);
1352 if (copy_from_user(paddc, user + size, len - size) != 0) {
1357 t = xt_find_table_lock(net, AF_INET6, name);
1358 if (IS_ERR_OR_NULL(t)) {
1359 ret = t ? PTR_ERR(t) : -ENOENT;
1365 private = t->private;
1366 if (private->number != num_counters) {
1368 goto unlock_up_free;
1372 /* Choose the copy that is on our node */
1373 curcpu = smp_processor_id();
1374 addend = xt_write_recseq_begin();
1375 loc_cpu_entry = private->entries[curcpu];
1376 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1377 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1380 xt_write_recseq_end(addend);
1392 #ifdef CONFIG_COMPAT
1393 struct compat_ip6t_replace {
1394 char name[XT_TABLE_MAXNAMELEN];
1398 u32 hook_entry[NF_INET_NUMHOOKS];
1399 u32 underflow[NF_INET_NUMHOOKS];
1401 compat_uptr_t counters; /* struct xt_counters * */
1402 struct compat_ip6t_entry entries[0];
1406 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1407 unsigned int *size, struct xt_counters *counters,
1410 struct xt_entry_target *t;
1411 struct compat_ip6t_entry __user *ce;
1412 u_int16_t target_offset, next_offset;
1413 compat_uint_t origsize;
1414 const struct xt_entry_match *ematch;
1418 ce = (struct compat_ip6t_entry __user *)*dstptr;
1419 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1420 copy_to_user(&ce->counters, &counters[i],
1421 sizeof(counters[i])) != 0)
1424 *dstptr += sizeof(struct compat_ip6t_entry);
1425 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1427 xt_ematch_foreach(ematch, e) {
1428 ret = xt_compat_match_to_user(ematch, dstptr, size);
1432 target_offset = e->target_offset - (origsize - *size);
1433 t = ip6t_get_target(e);
1434 ret = xt_compat_target_to_user(t, dstptr, size);
1437 next_offset = e->next_offset - (origsize - *size);
1438 if (put_user(target_offset, &ce->target_offset) != 0 ||
1439 put_user(next_offset, &ce->next_offset) != 0)
1445 compat_find_calc_match(struct xt_entry_match *m,
1447 const struct ip6t_ip6 *ipv6,
1448 unsigned int hookmask,
1451 struct xt_match *match;
1453 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1454 m->u.user.revision);
1455 if (IS_ERR(match)) {
1456 duprintf("compat_check_calc_match: `%s' not found\n",
1458 return PTR_ERR(match);
1460 m->u.kernel.match = match;
1461 *size += xt_compat_match_offset(match);
1465 static void compat_release_entry(struct compat_ip6t_entry *e)
1467 struct xt_entry_target *t;
1468 struct xt_entry_match *ematch;
1470 /* Cleanup all matches */
1471 xt_ematch_foreach(ematch, e)
1472 module_put(ematch->u.kernel.match->me);
1473 t = compat_ip6t_get_target(e);
1474 module_put(t->u.kernel.target->me);
1478 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1479 struct xt_table_info *newinfo,
1481 const unsigned char *base,
1482 const unsigned char *limit,
1483 const unsigned int *hook_entries,
1484 const unsigned int *underflows,
1487 struct xt_entry_match *ematch;
1488 struct xt_entry_target *t;
1489 struct xt_target *target;
1490 unsigned int entry_offset;
1494 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1495 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1496 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1497 duprintf("Bad offset %p, limit = %p\n", e, limit);
1501 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1502 sizeof(struct compat_xt_entry_target)) {
1503 duprintf("checking: element %p size %u\n",
1508 /* For purposes of check_entry casting the compat entry is fine */
1509 ret = check_entry((struct ip6t_entry *)e, name);
1513 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1514 entry_offset = (void *)e - (void *)base;
1516 xt_ematch_foreach(ematch, e) {
1517 ret = compat_find_calc_match(ematch, name,
1518 &e->ipv6, e->comefrom, &off);
1520 goto release_matches;
1524 t = compat_ip6t_get_target(e);
1525 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1526 t->u.user.revision);
1527 if (IS_ERR(target)) {
1528 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1530 ret = PTR_ERR(target);
1531 goto release_matches;
1533 t->u.kernel.target = target;
1535 off += xt_compat_target_offset(target);
1537 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1541 /* Check hooks & underflows */
1542 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1543 if ((unsigned char *)e - base == hook_entries[h])
1544 newinfo->hook_entry[h] = hook_entries[h];
1545 if ((unsigned char *)e - base == underflows[h])
1546 newinfo->underflow[h] = underflows[h];
1549 /* Clear counters and comefrom */
1550 memset(&e->counters, 0, sizeof(e->counters));
1555 module_put(t->u.kernel.target->me);
1557 xt_ematch_foreach(ematch, e) {
1560 module_put(ematch->u.kernel.match->me);
1566 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1567 unsigned int *size, const char *name,
1568 struct xt_table_info *newinfo, unsigned char *base)
1570 struct xt_entry_target *t;
1571 struct ip6t_entry *de;
1572 unsigned int origsize;
1574 struct xt_entry_match *ematch;
1578 de = (struct ip6t_entry *)*dstptr;
1579 memcpy(de, e, sizeof(struct ip6t_entry));
1580 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1582 *dstptr += sizeof(struct ip6t_entry);
1583 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1585 xt_ematch_foreach(ematch, e) {
1586 ret = xt_compat_match_from_user(ematch, dstptr, size);
1590 de->target_offset = e->target_offset - (origsize - *size);
1591 t = compat_ip6t_get_target(e);
1592 xt_compat_target_from_user(t, dstptr, size);
1594 de->next_offset = e->next_offset - (origsize - *size);
1595 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1596 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1597 newinfo->hook_entry[h] -= origsize - *size;
1598 if ((unsigned char *)de - base < newinfo->underflow[h])
1599 newinfo->underflow[h] -= origsize - *size;
1604 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1609 struct xt_mtchk_param mtpar;
1610 struct xt_entry_match *ematch;
1615 mtpar.entryinfo = &e->ipv6;
1616 mtpar.hook_mask = e->comefrom;
1617 mtpar.family = NFPROTO_IPV6;
1618 xt_ematch_foreach(ematch, e) {
1619 ret = check_match(ematch, &mtpar);
1621 goto cleanup_matches;
1625 ret = check_target(e, net, name);
1627 goto cleanup_matches;
1631 xt_ematch_foreach(ematch, e) {
1634 cleanup_match(ematch, net);
1640 translate_compat_table(struct net *net,
1642 unsigned int valid_hooks,
1643 struct xt_table_info **pinfo,
1645 unsigned int total_size,
1646 unsigned int number,
1647 unsigned int *hook_entries,
1648 unsigned int *underflows)
1651 struct xt_table_info *newinfo, *info;
1652 void *pos, *entry0, *entry1;
1653 struct compat_ip6t_entry *iter0;
1654 struct ip6t_entry *iter1;
1661 info->number = number;
1663 /* Init all hooks to impossible value. */
1664 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1665 info->hook_entry[i] = 0xFFFFFFFF;
1666 info->underflow[i] = 0xFFFFFFFF;
1669 duprintf("translate_compat_table: size %u\n", info->size);
1671 xt_compat_lock(AF_INET6);
1672 xt_compat_init_offsets(AF_INET6, number);
1673 /* Walk through entries, checking offsets. */
1674 xt_entry_foreach(iter0, entry0, total_size) {
1675 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1677 entry0 + total_size,
1688 duprintf("translate_compat_table: %u not %u entries\n",
1693 /* Check hooks all assigned */
1694 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1695 /* Only hooks which are valid */
1696 if (!(valid_hooks & (1 << i)))
1698 if (info->hook_entry[i] == 0xFFFFFFFF) {
1699 duprintf("Invalid hook entry %u %u\n",
1700 i, hook_entries[i]);
1703 if (info->underflow[i] == 0xFFFFFFFF) {
1704 duprintf("Invalid underflow %u %u\n",
1711 newinfo = xt_alloc_table_info(size);
1715 newinfo->number = number;
1716 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1717 newinfo->hook_entry[i] = info->hook_entry[i];
1718 newinfo->underflow[i] = info->underflow[i];
1720 entry1 = newinfo->entries[raw_smp_processor_id()];
1723 xt_entry_foreach(iter0, entry0, total_size) {
1724 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1725 name, newinfo, entry1);
1729 xt_compat_flush_offsets(AF_INET6);
1730 xt_compat_unlock(AF_INET6);
1735 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1739 xt_entry_foreach(iter1, entry1, newinfo->size) {
1740 ret = compat_check_entry(iter1, net, name);
1744 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1745 XT_ERROR_TARGET) == 0)
1746 ++newinfo->stacksize;
1750 * The first i matches need cleanup_entry (calls ->destroy)
1751 * because they had called ->check already. The other j-i
1752 * entries need only release.
1756 xt_entry_foreach(iter0, entry0, newinfo->size) {
1761 compat_release_entry(iter0);
1763 xt_entry_foreach(iter1, entry1, newinfo->size) {
1766 cleanup_entry(iter1, net);
1768 xt_free_table_info(newinfo);
1772 /* And one copy for every other CPU */
1773 for_each_possible_cpu(i)
1774 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1775 memcpy(newinfo->entries[i], entry1, newinfo->size);
1779 xt_free_table_info(info);
1783 xt_free_table_info(newinfo);
1785 xt_entry_foreach(iter0, entry0, total_size) {
1788 compat_release_entry(iter0);
1792 xt_compat_flush_offsets(AF_INET6);
1793 xt_compat_unlock(AF_INET6);
1798 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1801 struct compat_ip6t_replace tmp;
1802 struct xt_table_info *newinfo;
1803 void *loc_cpu_entry;
1804 struct ip6t_entry *iter;
1806 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1809 /* overflow check */
1810 if (tmp.size >= INT_MAX / num_possible_cpus())
1812 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1814 tmp.name[sizeof(tmp.name)-1] = 0;
1816 newinfo = xt_alloc_table_info(tmp.size);
1820 /* choose the copy that is on our node/cpu */
1821 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1822 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1828 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1829 &newinfo, &loc_cpu_entry, tmp.size,
1830 tmp.num_entries, tmp.hook_entry,
1835 duprintf("compat_do_replace: Translated table\n");
1837 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1838 tmp.num_counters, compat_ptr(tmp.counters));
1840 goto free_newinfo_untrans;
1843 free_newinfo_untrans:
1844 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1845 cleanup_entry(iter, net);
1847 xt_free_table_info(newinfo);
1852 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1857 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1861 case IP6T_SO_SET_REPLACE:
1862 ret = compat_do_replace(sock_net(sk), user, len);
1865 case IP6T_SO_SET_ADD_COUNTERS:
1866 ret = do_add_counters(sock_net(sk), user, len, 1);
1870 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1877 struct compat_ip6t_get_entries {
1878 char name[XT_TABLE_MAXNAMELEN];
1880 struct compat_ip6t_entry entrytable[0];
1884 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1885 void __user *userptr)
1887 struct xt_counters *counters;
1888 const struct xt_table_info *private = table->private;
1892 const void *loc_cpu_entry;
1894 struct ip6t_entry *iter;
1896 counters = alloc_counters(table);
1897 if (IS_ERR(counters))
1898 return PTR_ERR(counters);
1900 /* choose the copy that is on our node/cpu, ...
1901 * This choice is lazy (because current thread is
1902 * allowed to migrate to another cpu)
1904 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1907 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1908 ret = compat_copy_entry_to_user(iter, &pos,
1909 &size, counters, i++);
1919 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1923 struct compat_ip6t_get_entries get;
1926 if (*len < sizeof(get)) {
1927 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1931 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1934 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1935 duprintf("compat_get_entries: %u != %zu\n",
1936 *len, sizeof(get) + get.size);
1940 xt_compat_lock(AF_INET6);
1941 t = xt_find_table_lock(net, AF_INET6, get.name);
1942 if (!IS_ERR_OR_NULL(t)) {
1943 const struct xt_table_info *private = t->private;
1944 struct xt_table_info info;
1945 duprintf("t->private->number = %u\n", private->number);
1946 ret = compat_table_info(private, &info);
1947 if (!ret && get.size == info.size) {
1948 ret = compat_copy_entries_to_user(private->size,
1949 t, uptr->entrytable);
1951 duprintf("compat_get_entries: I've got %u not %u!\n",
1952 private->size, get.size);
1955 xt_compat_flush_offsets(AF_INET6);
1959 ret = t ? PTR_ERR(t) : -ENOENT;
1961 xt_compat_unlock(AF_INET6);
1965 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1968 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1972 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1976 case IP6T_SO_GET_INFO:
1977 ret = get_info(sock_net(sk), user, len, 1);
1979 case IP6T_SO_GET_ENTRIES:
1980 ret = compat_get_entries(sock_net(sk), user, len);
1983 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1990 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1994 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1998 case IP6T_SO_SET_REPLACE:
1999 ret = do_replace(sock_net(sk), user, len);
2002 case IP6T_SO_SET_ADD_COUNTERS:
2003 ret = do_add_counters(sock_net(sk), user, len, 0);
2007 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2015 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2019 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2023 case IP6T_SO_GET_INFO:
2024 ret = get_info(sock_net(sk), user, len, 0);
2027 case IP6T_SO_GET_ENTRIES:
2028 ret = get_entries(sock_net(sk), user, len);
2031 case IP6T_SO_GET_REVISION_MATCH:
2032 case IP6T_SO_GET_REVISION_TARGET: {
2033 struct xt_get_revision rev;
2036 if (*len != sizeof(rev)) {
2040 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2044 rev.name[sizeof(rev.name)-1] = 0;
2046 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2051 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2054 "ip6t_%s", rev.name);
2059 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2066 struct xt_table *ip6t_register_table(struct net *net,
2067 const struct xt_table *table,
2068 const struct ip6t_replace *repl)
2071 struct xt_table_info *newinfo;
2072 struct xt_table_info bootstrap = {0};
2073 void *loc_cpu_entry;
2074 struct xt_table *new_table;
2076 newinfo = xt_alloc_table_info(repl->size);
2082 /* choose the copy on our node/cpu, but dont care about preemption */
2083 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2084 memcpy(loc_cpu_entry, repl->entries, repl->size);
2086 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2090 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2091 if (IS_ERR(new_table)) {
2092 ret = PTR_ERR(new_table);
2098 xt_free_table_info(newinfo);
2100 return ERR_PTR(ret);
2103 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2105 struct xt_table_info *private;
2106 void *loc_cpu_entry;
2107 struct module *table_owner = table->me;
2108 struct ip6t_entry *iter;
2110 private = xt_unregister_table(table);
2112 /* Decrease module usage counts and free resources */
2113 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2114 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2115 cleanup_entry(iter, net);
2116 if (private->number > private->initial_entries)
2117 module_put(table_owner);
2118 xt_free_table_info(private);
2121 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2123 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2124 u_int8_t type, u_int8_t code,
2127 return (type == test_type && code >= min_code && code <= max_code)
2132 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2134 const struct icmp6hdr *ic;
2135 struct icmp6hdr _icmph;
2136 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2138 /* Must not be a fragment. */
2139 if (par->fragoff != 0)
2142 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2144 /* We've been asked to examine this packet, and we
2145 * can't. Hence, no choice but to drop.
2147 duprintf("Dropping evil ICMP tinygram.\n");
2148 par->hotdrop = true;
2152 return icmp6_type_code_match(icmpinfo->type,
2155 ic->icmp6_type, ic->icmp6_code,
2156 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2159 /* Called when user tries to insert an entry of this type. */
2160 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2162 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2164 /* Must specify no unknown invflags */
2165 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2168 /* The built-in targets: standard (NULL) and error. */
2169 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2171 .name = XT_STANDARD_TARGET,
2172 .targetsize = sizeof(int),
2173 .family = NFPROTO_IPV6,
2174 #ifdef CONFIG_COMPAT
2175 .compatsize = sizeof(compat_int_t),
2176 .compat_from_user = compat_standard_from_user,
2177 .compat_to_user = compat_standard_to_user,
2181 .name = XT_ERROR_TARGET,
2182 .target = ip6t_error,
2183 .targetsize = XT_FUNCTION_MAXNAMELEN,
2184 .family = NFPROTO_IPV6,
2188 static struct nf_sockopt_ops ip6t_sockopts = {
2190 .set_optmin = IP6T_BASE_CTL,
2191 .set_optmax = IP6T_SO_SET_MAX+1,
2192 .set = do_ip6t_set_ctl,
2193 #ifdef CONFIG_COMPAT
2194 .compat_set = compat_do_ip6t_set_ctl,
2196 .get_optmin = IP6T_BASE_CTL,
2197 .get_optmax = IP6T_SO_GET_MAX+1,
2198 .get = do_ip6t_get_ctl,
2199 #ifdef CONFIG_COMPAT
2200 .compat_get = compat_do_ip6t_get_ctl,
2202 .owner = THIS_MODULE,
2205 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2208 .match = icmp6_match,
2209 .matchsize = sizeof(struct ip6t_icmp),
2210 .checkentry = icmp6_checkentry,
2211 .proto = IPPROTO_ICMPV6,
2212 .family = NFPROTO_IPV6,
2216 static int __net_init ip6_tables_net_init(struct net *net)
2218 return xt_proto_init(net, NFPROTO_IPV6);
2221 static void __net_exit ip6_tables_net_exit(struct net *net)
2223 xt_proto_fini(net, NFPROTO_IPV6);
2226 static struct pernet_operations ip6_tables_net_ops = {
2227 .init = ip6_tables_net_init,
2228 .exit = ip6_tables_net_exit,
2231 static int __init ip6_tables_init(void)
2235 ret = register_pernet_subsys(&ip6_tables_net_ops);
2239 /* No one else will be downing sem now, so we won't sleep */
2240 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2243 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2247 /* Register setsockopt */
2248 ret = nf_register_sockopt(&ip6t_sockopts);
2252 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2256 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2258 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2260 unregister_pernet_subsys(&ip6_tables_net_ops);
2265 static void __exit ip6_tables_fini(void)
2267 nf_unregister_sockopt(&ip6t_sockopts);
2269 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2270 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2271 unregister_pernet_subsys(&ip6_tables_net_ops);
2274 EXPORT_SYMBOL(ip6t_register_table);
2275 EXPORT_SYMBOL(ip6t_unregister_table);
2276 EXPORT_SYMBOL(ip6t_do_table);
2278 module_init(ip6_tables_init);
2279 module_exit(ip6_tables_fini);