2 * This is a module which is used for logging packets to userspace via
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6 * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
8 * Based on the old ipv4-only ipt_ULOG.c:
9 * (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/skbuff.h>
17 #include <linux/if_arp.h>
18 #include <linux/init.h>
20 #include <linux/ipv6.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter.h>
23 #include <net/netlink.h>
24 #include <linux/netfilter/nfnetlink.h>
25 #include <linux/netfilter/nfnetlink_log.h>
26 #include <linux/spinlock.h>
27 #include <linux/sysctl.h>
28 #include <linux/proc_fs.h>
29 #include <linux/security.h>
30 #include <linux/list.h>
31 #include <linux/jhash.h>
32 #include <linux/random.h>
33 #include <linux/slab.h>
35 #include <net/netfilter/nf_log.h>
36 #include <net/netns/generic.h>
37 #include <net/netfilter/nfnetlink_log.h>
39 #include <linux/atomic.h>
41 #ifdef CONFIG_BRIDGE_NETFILTER
42 #include "../bridge/br_private.h"
45 #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
46 #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
47 #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
48 #define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */
50 #define PRINTR(x, args...) do { if (net_ratelimit()) \
51 printk(x, ## args); } while (0);
53 struct nfulnl_instance {
54 struct hlist_node hlist; /* global list of instances */
56 atomic_t use; /* use count */
58 unsigned int qlen; /* number of nlmsgs in skb */
59 struct sk_buff *skb; /* pre-allocatd skb */
60 struct timer_list timer;
62 struct user_namespace *peer_user_ns; /* User namespace of the peer process */
63 int peer_portid; /* PORTID of the peer process */
65 /* configurable parameters */
66 unsigned int flushtimeout; /* timeout until queue flush */
67 unsigned int nlbufsiz; /* netlink buffer allocation size */
68 unsigned int qthreshold; /* threshold of the queue */
70 u_int32_t seq; /* instance-local sequential counter */
71 u_int16_t group_num; /* number of this queue */
77 #define INSTANCE_BUCKETS 16
78 static unsigned int hash_init;
80 static int nfnl_log_net_id __read_mostly;
83 spinlock_t instances_lock;
84 struct hlist_head instance_table[INSTANCE_BUCKETS];
88 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
90 return net_generic(net, nfnl_log_net_id);
93 static inline u_int8_t instance_hashfn(u_int16_t group_num)
95 return ((group_num & 0xff) % INSTANCE_BUCKETS);
98 static struct nfulnl_instance *
99 __instance_lookup(struct nfnl_log_net *log, u_int16_t group_num)
101 struct hlist_head *head;
102 struct nfulnl_instance *inst;
104 head = &log->instance_table[instance_hashfn(group_num)];
105 hlist_for_each_entry_rcu(inst, head, hlist) {
106 if (inst->group_num == group_num)
113 instance_get(struct nfulnl_instance *inst)
115 atomic_inc(&inst->use);
118 static struct nfulnl_instance *
119 instance_lookup_get(struct nfnl_log_net *log, u_int16_t group_num)
121 struct nfulnl_instance *inst;
124 inst = __instance_lookup(log, group_num);
125 if (inst && !atomic_inc_not_zero(&inst->use))
127 rcu_read_unlock_bh();
132 static void nfulnl_instance_free_rcu(struct rcu_head *head)
134 struct nfulnl_instance *inst =
135 container_of(head, struct nfulnl_instance, rcu);
139 module_put(THIS_MODULE);
143 instance_put(struct nfulnl_instance *inst)
145 if (inst && atomic_dec_and_test(&inst->use))
146 call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
149 static void nfulnl_timer(unsigned long data);
151 static struct nfulnl_instance *
152 instance_create(struct net *net, u_int16_t group_num,
153 int portid, struct user_namespace *user_ns)
155 struct nfulnl_instance *inst;
156 struct nfnl_log_net *log = nfnl_log_pernet(net);
159 spin_lock_bh(&log->instances_lock);
160 if (__instance_lookup(log, group_num)) {
165 inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
171 if (!try_module_get(THIS_MODULE)) {
177 INIT_HLIST_NODE(&inst->hlist);
178 spin_lock_init(&inst->lock);
179 /* needs to be two, since we _put() after creation */
180 atomic_set(&inst->use, 2);
182 setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
184 inst->net = get_net(net);
185 inst->peer_user_ns = user_ns;
186 inst->peer_portid = portid;
187 inst->group_num = group_num;
189 inst->qthreshold = NFULNL_QTHRESH_DEFAULT;
190 inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT;
191 inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT;
192 inst->copy_mode = NFULNL_COPY_PACKET;
193 inst->copy_range = NFULNL_COPY_RANGE_MAX;
195 hlist_add_head_rcu(&inst->hlist,
196 &log->instance_table[instance_hashfn(group_num)]);
199 spin_unlock_bh(&log->instances_lock);
204 spin_unlock_bh(&log->instances_lock);
208 static void __nfulnl_flush(struct nfulnl_instance *inst);
210 /* called with BH disabled */
212 __instance_destroy(struct nfulnl_instance *inst)
214 /* first pull it out of the global list */
215 hlist_del_rcu(&inst->hlist);
217 /* then flush all pending packets from skb */
219 spin_lock(&inst->lock);
221 /* lockless readers wont be able to use us */
222 inst->copy_mode = NFULNL_COPY_DISABLED;
225 __nfulnl_flush(inst);
226 spin_unlock(&inst->lock);
228 /* and finally put the refcount */
233 instance_destroy(struct nfnl_log_net *log,
234 struct nfulnl_instance *inst)
236 spin_lock_bh(&log->instances_lock);
237 __instance_destroy(inst);
238 spin_unlock_bh(&log->instances_lock);
242 nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
247 spin_lock_bh(&inst->lock);
250 case NFULNL_COPY_NONE:
251 case NFULNL_COPY_META:
252 inst->copy_mode = mode;
253 inst->copy_range = 0;
256 case NFULNL_COPY_PACKET:
257 inst->copy_mode = mode;
258 inst->copy_range = min_t(unsigned int,
259 range, NFULNL_COPY_RANGE_MAX);
267 spin_unlock_bh(&inst->lock);
273 nfulnl_set_nlbufsiz(struct nfulnl_instance *inst, u_int32_t nlbufsiz)
277 spin_lock_bh(&inst->lock);
278 if (nlbufsiz < NFULNL_NLBUFSIZ_DEFAULT)
280 else if (nlbufsiz > 131072)
283 inst->nlbufsiz = nlbufsiz;
286 spin_unlock_bh(&inst->lock);
292 nfulnl_set_timeout(struct nfulnl_instance *inst, u_int32_t timeout)
294 spin_lock_bh(&inst->lock);
295 inst->flushtimeout = timeout;
296 spin_unlock_bh(&inst->lock);
302 nfulnl_set_qthresh(struct nfulnl_instance *inst, u_int32_t qthresh)
304 spin_lock_bh(&inst->lock);
305 inst->qthreshold = qthresh;
306 spin_unlock_bh(&inst->lock);
312 nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
314 spin_lock_bh(&inst->lock);
316 spin_unlock_bh(&inst->lock);
321 static struct sk_buff *
322 nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size)
327 /* alloc skb which should be big enough for a whole multipart
328 * message. WARNING: has to be <= 128k due to slab restrictions */
330 n = max(inst_size, pkt_size);
331 skb = alloc_skb(n, GFP_ATOMIC);
334 /* try to allocate only as much as we need for current
337 skb = alloc_skb(pkt_size, GFP_ATOMIC);
339 pr_err("nfnetlink_log: can't even alloc %u bytes\n",
348 __nfulnl_send(struct nfulnl_instance *inst)
352 if (inst->qlen > 1) {
353 struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
355 sizeof(struct nfgenmsg),
360 status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
370 __nfulnl_flush(struct nfulnl_instance *inst)
372 /* timer holds a reference */
373 if (del_timer(&inst->timer))
380 nfulnl_timer(unsigned long data)
382 struct nfulnl_instance *inst = (struct nfulnl_instance *)data;
384 spin_lock_bh(&inst->lock);
387 spin_unlock_bh(&inst->lock);
391 /* This is an inline function, we don't really care about a long
392 * list of arguments */
394 __build_packet_message(struct nfnl_log_net *log,
395 struct nfulnl_instance *inst,
396 const struct sk_buff *skb,
397 unsigned int data_len,
399 unsigned int hooknum,
400 const struct net_device *indev,
401 const struct net_device *outdev,
402 const char *prefix, unsigned int plen)
404 struct nfulnl_msg_packet_hdr pmsg;
405 struct nlmsghdr *nlh;
406 struct nfgenmsg *nfmsg;
407 sk_buff_data_t old_tail = inst->skb->tail;
409 const unsigned char *hwhdrp;
411 nlh = nlmsg_put(inst->skb, 0, 0,
412 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
413 sizeof(struct nfgenmsg), 0);
416 nfmsg = nlmsg_data(nlh);
417 nfmsg->nfgen_family = pf;
418 nfmsg->version = NFNETLINK_V0;
419 nfmsg->res_id = htons(inst->group_num);
421 pmsg.hw_protocol = skb->protocol;
424 if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
425 goto nla_put_failure;
428 nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
429 goto nla_put_failure;
432 #ifndef CONFIG_BRIDGE_NETFILTER
433 if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
434 htonl(indev->ifindex)))
435 goto nla_put_failure;
437 if (pf == PF_BRIDGE) {
438 /* Case 1: outdev is physical input device, we need to
439 * look for bridge group (when called from
440 * netfilter_bridge) */
441 if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
442 htonl(indev->ifindex)) ||
443 /* this is the bridge group "brX" */
444 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
445 nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
446 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
447 goto nla_put_failure;
449 /* Case 2: indev is bridge group, we need to look for
450 * physical device (when called from ipv4) */
451 if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
452 htonl(indev->ifindex)))
453 goto nla_put_failure;
454 if (skb->nf_bridge && skb->nf_bridge->physindev &&
455 nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
456 htonl(skb->nf_bridge->physindev->ifindex)))
457 goto nla_put_failure;
463 #ifndef CONFIG_BRIDGE_NETFILTER
464 if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
465 htonl(outdev->ifindex)))
466 goto nla_put_failure;
468 if (pf == PF_BRIDGE) {
469 /* Case 1: outdev is physical output device, we need to
470 * look for bridge group (when called from
471 * netfilter_bridge) */
472 if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
473 htonl(outdev->ifindex)) ||
474 /* this is the bridge group "brX" */
475 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
476 nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
477 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
478 goto nla_put_failure;
480 /* Case 2: indev is a bridge group, we need to look
481 * for physical device (when called from ipv4) */
482 if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
483 htonl(outdev->ifindex)))
484 goto nla_put_failure;
485 if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
486 nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
487 htonl(skb->nf_bridge->physoutdev->ifindex)))
488 goto nla_put_failure;
494 nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
495 goto nla_put_failure;
497 if (indev && skb->dev &&
498 skb->mac_header != skb->network_header) {
499 struct nfulnl_msg_packet_hw phw;
500 int len = dev_parse_header(skb, phw.hw_addr);
502 phw.hw_addrlen = htons(len);
503 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
504 goto nla_put_failure;
508 if (indev && skb_mac_header_was_set(skb)) {
509 if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
510 nla_put_be16(inst->skb, NFULA_HWLEN,
511 htons(skb->dev->hard_header_len)))
512 goto nla_put_failure;
514 hwhdrp = skb_mac_header(skb);
516 if (skb->dev->type == ARPHRD_SIT)
519 if (hwhdrp >= skb->head &&
520 nla_put(inst->skb, NFULA_HWHEADER,
521 skb->dev->hard_header_len, hwhdrp))
522 goto nla_put_failure;
525 if (skb->tstamp.tv64) {
526 struct nfulnl_msg_packet_timestamp ts;
527 struct timeval tv = ktime_to_timeval(skb->tstamp);
528 ts.sec = cpu_to_be64(tv.tv_sec);
529 ts.usec = cpu_to_be64(tv.tv_usec);
531 if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
532 goto nla_put_failure;
537 if (sk && sk->sk_state != TCP_TIME_WAIT) {
538 read_lock_bh(&sk->sk_callback_lock);
539 if (sk->sk_socket && sk->sk_socket->file) {
540 struct file *file = sk->sk_socket->file;
541 const struct cred *cred = file->f_cred;
542 struct user_namespace *user_ns = inst->peer_user_ns;
543 __be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
544 __be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
545 read_unlock_bh(&sk->sk_callback_lock);
546 if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
547 nla_put_be32(inst->skb, NFULA_GID, gid))
548 goto nla_put_failure;
550 read_unlock_bh(&sk->sk_callback_lock);
553 /* local sequence number */
554 if ((inst->flags & NFULNL_CFG_F_SEQ) &&
555 nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
556 goto nla_put_failure;
558 /* global sequence number */
559 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
560 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
561 htonl(atomic_inc_return(&log->global_seq))))
562 goto nla_put_failure;
566 int size = nla_attr_size(data_len);
568 if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
569 printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
573 nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
574 nla->nla_type = NFULA_PAYLOAD;
577 if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
581 nlh->nlmsg_len = inst->skb->tail - old_tail;
585 PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
589 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
591 static struct nf_loginfo default_loginfo = {
592 .type = NF_LOG_TYPE_ULOG,
602 /* log handler for internal netfilter logging api */
604 nfulnl_log_packet(u_int8_t pf,
605 unsigned int hooknum,
606 const struct sk_buff *skb,
607 const struct net_device *in,
608 const struct net_device *out,
609 const struct nf_loginfo *li_user,
612 unsigned int size, data_len;
613 struct nfulnl_instance *inst;
614 const struct nf_loginfo *li;
615 unsigned int qthreshold;
617 struct net *net = dev_net(in ? in : out);
618 struct nfnl_log_net *log = nfnl_log_pernet(net);
620 if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
623 li = &default_loginfo;
625 inst = instance_lookup_get(log, li->u.ulog.group);
631 plen = strlen(prefix) + 1;
633 /* FIXME: do we want to make the size calculation conditional based on
634 * what is actually present? way more branches and checks, but more
635 * memory efficient... */
636 size = nlmsg_total_size(sizeof(struct nfgenmsg))
637 + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
638 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
639 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
640 #ifdef CONFIG_BRIDGE_NETFILTER
641 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
642 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
644 + nla_total_size(sizeof(u_int32_t)) /* mark */
645 + nla_total_size(sizeof(u_int32_t)) /* uid */
646 + nla_total_size(sizeof(u_int32_t)) /* gid */
647 + nla_total_size(plen) /* prefix */
648 + nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
649 + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp));
651 if (in && skb_mac_header_was_set(skb)) {
652 size += nla_total_size(skb->dev->hard_header_len)
653 + nla_total_size(sizeof(u_int16_t)) /* hwtype */
654 + nla_total_size(sizeof(u_int16_t)); /* hwlen */
657 spin_lock_bh(&inst->lock);
659 if (inst->flags & NFULNL_CFG_F_SEQ)
660 size += nla_total_size(sizeof(u_int32_t));
661 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
662 size += nla_total_size(sizeof(u_int32_t));
664 qthreshold = inst->qthreshold;
665 /* per-rule qthreshold overrides per-instance */
666 if (li->u.ulog.qthreshold)
667 if (qthreshold > li->u.ulog.qthreshold)
668 qthreshold = li->u.ulog.qthreshold;
671 switch (inst->copy_mode) {
672 case NFULNL_COPY_META:
673 case NFULNL_COPY_NONE:
677 case NFULNL_COPY_PACKET:
678 if (inst->copy_range == 0
679 || inst->copy_range > skb->len)
682 data_len = inst->copy_range;
684 size += nla_total_size(data_len);
687 case NFULNL_COPY_DISABLED:
689 goto unlock_and_release;
693 size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) {
694 /* either the queue len is too high or we don't have
695 * enough room in the skb left. flush to userspace. */
696 __nfulnl_flush(inst);
700 inst->skb = nfulnl_alloc_skb(inst->nlbufsiz, size);
707 __build_packet_message(log, inst, skb, data_len, pf,
708 hooknum, in, out, prefix, plen);
710 if (inst->qlen >= qthreshold)
711 __nfulnl_flush(inst);
712 /* timer_pending always called within inst->lock, so there
713 * is no chance of a race here */
714 else if (!timer_pending(&inst->timer)) {
716 inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
717 add_timer(&inst->timer);
721 spin_unlock_bh(&inst->lock);
726 /* FIXME: statistics */
727 goto unlock_and_release;
729 EXPORT_SYMBOL_GPL(nfulnl_log_packet);
732 nfulnl_rcv_nl_event(struct notifier_block *this,
733 unsigned long event, void *ptr)
735 struct netlink_notify *n = ptr;
736 struct nfnl_log_net *log = nfnl_log_pernet(n->net);
738 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
741 /* destroy all instances for this portid */
742 spin_lock_bh(&log->instances_lock);
743 for (i = 0; i < INSTANCE_BUCKETS; i++) {
744 struct hlist_node *t2;
745 struct nfulnl_instance *inst;
746 struct hlist_head *head = &log->instance_table[i];
748 hlist_for_each_entry_safe(inst, t2, head, hlist) {
749 if (n->portid == inst->peer_portid)
750 __instance_destroy(inst);
753 spin_unlock_bh(&log->instances_lock);
758 static struct notifier_block nfulnl_rtnl_notifier = {
759 .notifier_call = nfulnl_rcv_nl_event,
763 nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
764 const struct nlmsghdr *nlh,
765 const struct nlattr * const nfqa[])
770 static struct nf_logger nfulnl_logger __read_mostly = {
771 .name = "nfnetlink_log",
772 .logfn = &nfulnl_log_packet,
776 static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = {
777 [NFULA_CFG_CMD] = { .len = sizeof(struct nfulnl_msg_config_cmd) },
778 [NFULA_CFG_MODE] = { .len = sizeof(struct nfulnl_msg_config_mode) },
779 [NFULA_CFG_TIMEOUT] = { .type = NLA_U32 },
780 [NFULA_CFG_QTHRESH] = { .type = NLA_U32 },
781 [NFULA_CFG_NLBUFSIZ] = { .type = NLA_U32 },
782 [NFULA_CFG_FLAGS] = { .type = NLA_U16 },
786 nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
787 const struct nlmsghdr *nlh,
788 const struct nlattr * const nfula[])
790 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
791 u_int16_t group_num = ntohs(nfmsg->res_id);
792 struct nfulnl_instance *inst;
793 struct nfulnl_msg_config_cmd *cmd = NULL;
794 struct net *net = sock_net(ctnl);
795 struct nfnl_log_net *log = nfnl_log_pernet(net);
798 if (nfula[NFULA_CFG_CMD]) {
799 u_int8_t pf = nfmsg->nfgen_family;
800 cmd = nla_data(nfula[NFULA_CFG_CMD]);
802 /* Commands without queue context */
803 switch (cmd->command) {
804 case NFULNL_CFG_CMD_PF_BIND:
805 return nf_log_bind_pf(net, pf, &nfulnl_logger);
806 case NFULNL_CFG_CMD_PF_UNBIND:
807 nf_log_unbind_pf(net, pf);
812 inst = instance_lookup_get(log, group_num);
813 if (inst && inst->peer_portid != NETLINK_CB(skb).portid) {
819 switch (cmd->command) {
820 case NFULNL_CFG_CMD_BIND:
826 inst = instance_create(net, group_num,
827 NETLINK_CB(skb).portid,
828 sk_user_ns(NETLINK_CB(skb).ssk));
834 case NFULNL_CFG_CMD_UNBIND:
840 instance_destroy(log, inst);
848 if (nfula[NFULA_CFG_MODE]) {
849 struct nfulnl_msg_config_mode *params;
850 params = nla_data(nfula[NFULA_CFG_MODE]);
856 nfulnl_set_mode(inst, params->copy_mode,
857 ntohl(params->copy_range));
860 if (nfula[NFULA_CFG_TIMEOUT]) {
861 __be32 timeout = nla_get_be32(nfula[NFULA_CFG_TIMEOUT]);
867 nfulnl_set_timeout(inst, ntohl(timeout));
870 if (nfula[NFULA_CFG_NLBUFSIZ]) {
871 __be32 nlbufsiz = nla_get_be32(nfula[NFULA_CFG_NLBUFSIZ]);
877 nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz));
880 if (nfula[NFULA_CFG_QTHRESH]) {
881 __be32 qthresh = nla_get_be32(nfula[NFULA_CFG_QTHRESH]);
887 nfulnl_set_qthresh(inst, ntohl(qthresh));
890 if (nfula[NFULA_CFG_FLAGS]) {
891 __be16 flags = nla_get_be16(nfula[NFULA_CFG_FLAGS]);
897 nfulnl_set_flags(inst, ntohs(flags));
906 static const struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = {
907 [NFULNL_MSG_PACKET] = { .call = nfulnl_recv_unsupp,
908 .attr_count = NFULA_MAX, },
909 [NFULNL_MSG_CONFIG] = { .call = nfulnl_recv_config,
910 .attr_count = NFULA_CFG_MAX,
911 .policy = nfula_cfg_policy },
914 static const struct nfnetlink_subsystem nfulnl_subsys = {
916 .subsys_id = NFNL_SUBSYS_ULOG,
917 .cb_count = NFULNL_MSG_MAX,
921 #ifdef CONFIG_PROC_FS
923 struct seq_net_private p;
927 static struct hlist_node *get_first(struct net *net, struct iter_state *st)
929 struct nfnl_log_net *log;
933 log = nfnl_log_pernet(net);
935 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
936 struct hlist_head *head = &log->instance_table[st->bucket];
938 if (!hlist_empty(head))
939 return rcu_dereference_bh(hlist_first_rcu(head));
944 static struct hlist_node *get_next(struct net *net, struct iter_state *st,
945 struct hlist_node *h)
947 h = rcu_dereference_bh(hlist_next_rcu(h));
949 struct nfnl_log_net *log;
950 struct hlist_head *head;
952 if (++st->bucket >= INSTANCE_BUCKETS)
955 log = nfnl_log_pernet(net);
956 head = &log->instance_table[st->bucket];
957 h = rcu_dereference_bh(hlist_first_rcu(head));
962 static struct hlist_node *get_idx(struct net *net, struct iter_state *st,
965 struct hlist_node *head;
966 head = get_first(net, st);
969 while (pos && (head = get_next(net, st, head)))
971 return pos ? NULL : head;
974 static void *seq_start(struct seq_file *s, loff_t *pos)
978 return get_idx(seq_file_net(s), s->private, *pos);
981 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
984 return get_next(seq_file_net(s), s->private, v);
987 static void seq_stop(struct seq_file *s, void *v)
990 rcu_read_unlock_bh();
993 static int seq_show(struct seq_file *s, void *v)
995 const struct nfulnl_instance *inst = v;
997 return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
999 inst->peer_portid, inst->qlen,
1000 inst->copy_mode, inst->copy_range,
1001 inst->flushtimeout, atomic_read(&inst->use));
1004 static const struct seq_operations nful_seq_ops = {
1011 static int nful_open(struct inode *inode, struct file *file)
1013 return seq_open_net(inode, file, &nful_seq_ops,
1014 sizeof(struct iter_state));
1017 static const struct file_operations nful_file_ops = {
1018 .owner = THIS_MODULE,
1021 .llseek = seq_lseek,
1022 .release = seq_release_net,
1025 #endif /* PROC_FS */
1027 static int __net_init nfnl_log_net_init(struct net *net)
1030 struct nfnl_log_net *log = nfnl_log_pernet(net);
1032 for (i = 0; i < INSTANCE_BUCKETS; i++)
1033 INIT_HLIST_HEAD(&log->instance_table[i]);
1034 spin_lock_init(&log->instances_lock);
1036 #ifdef CONFIG_PROC_FS
1037 if (!proc_create("nfnetlink_log", 0440,
1038 net->nf.proc_netfilter, &nful_file_ops))
1044 static void __net_exit nfnl_log_net_exit(struct net *net)
1046 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
1049 static struct pernet_operations nfnl_log_net_ops = {
1050 .init = nfnl_log_net_init,
1051 .exit = nfnl_log_net_exit,
1052 .id = &nfnl_log_net_id,
1053 .size = sizeof(struct nfnl_log_net),
1056 static int __init nfnetlink_log_init(void)
1058 int status = -ENOMEM;
1060 /* it's not really all that important to have a random value, so
1061 * we can do this from the init function, even if there hasn't
1062 * been that much entropy yet */
1063 get_random_bytes(&hash_init, sizeof(hash_init));
1065 netlink_register_notifier(&nfulnl_rtnl_notifier);
1066 status = nfnetlink_subsys_register(&nfulnl_subsys);
1068 pr_err("log: failed to create netlink socket\n");
1069 goto cleanup_netlink_notifier;
1072 status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger);
1074 pr_err("log: failed to register logger\n");
1075 goto cleanup_subsys;
1078 status = register_pernet_subsys(&nfnl_log_net_ops);
1080 pr_err("log: failed to register pernet ops\n");
1081 goto cleanup_logger;
1086 nf_log_unregister(&nfulnl_logger);
1088 nfnetlink_subsys_unregister(&nfulnl_subsys);
1089 cleanup_netlink_notifier:
1090 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
1094 static void __exit nfnetlink_log_fini(void)
1096 unregister_pernet_subsys(&nfnl_log_net_ops);
1097 nf_log_unregister(&nfulnl_logger);
1098 nfnetlink_subsys_unregister(&nfulnl_subsys);
1099 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
1102 MODULE_DESCRIPTION("netfilter userspace logging");
1103 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1104 MODULE_LICENSE("GPL");
1105 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
1107 module_init(nfnetlink_log_init);
1108 module_exit(nfnetlink_log_fini);