2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/mutex.h>
58 #include <linux/vmalloc.h>
60 #include <net/net_namespace.h>
63 #include <net/netlink.h>
65 #include "af_netlink.h"
69 unsigned long masks[0];
73 #define NETLINK_CONGESTED 0x0
76 #define NETLINK_KERNEL_SOCKET 0x1
77 #define NETLINK_RECV_PKTINFO 0x2
78 #define NETLINK_BROADCAST_SEND_ERROR 0x4
79 #define NETLINK_RECV_NO_ENOBUFS 0x8
81 static inline int netlink_is_kernel(struct sock *sk)
83 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
86 struct netlink_table *nl_table;
87 EXPORT_SYMBOL_GPL(nl_table);
89 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
91 static int netlink_dump(struct sock *sk);
93 DEFINE_RWLOCK(nl_table_lock);
94 EXPORT_SYMBOL_GPL(nl_table_lock);
95 static atomic_t nl_table_users = ATOMIC_INIT(0);
97 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
99 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
101 static inline u32 netlink_group_mask(u32 group)
103 return group ? 1 << (group - 1) : 0;
106 static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
108 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
111 #ifdef CONFIG_NETLINK_MMAP
112 static __pure struct page *pgvec_to_page(const void *addr)
114 if (is_vmalloc_addr(addr))
115 return vmalloc_to_page(addr);
117 return virt_to_page(addr);
120 static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
124 for (i = 0; i < len; i++) {
125 if (pg_vec[i] != NULL) {
126 if (is_vmalloc_addr(pg_vec[i]))
129 free_pages((unsigned long)pg_vec[i], order);
135 static void *alloc_one_pg_vec_page(unsigned long order)
138 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
139 __GFP_NOWARN | __GFP_NORETRY;
141 buffer = (void *)__get_free_pages(gfp_flags, order);
145 buffer = vzalloc((1 << order) * PAGE_SIZE);
149 gfp_flags &= ~__GFP_NORETRY;
150 return (void *)__get_free_pages(gfp_flags, order);
153 static void **alloc_pg_vec(struct netlink_sock *nlk,
154 struct nl_mmap_req *req, unsigned int order)
156 unsigned int block_nr = req->nm_block_nr;
160 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
164 for (i = 0; i < block_nr; i++) {
165 pg_vec[i] = ptr = alloc_one_pg_vec_page(order);
166 if (pg_vec[i] == NULL)
172 free_pg_vec(pg_vec, order, block_nr);
176 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
177 bool closing, bool tx_ring)
179 struct netlink_sock *nlk = nlk_sk(sk);
180 struct netlink_ring *ring;
181 struct sk_buff_head *queue;
182 void **pg_vec = NULL;
183 unsigned int order = 0;
186 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
187 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
190 if (atomic_read(&nlk->mapped))
192 if (atomic_read(&ring->pending))
196 if (req->nm_block_nr) {
197 if (ring->pg_vec != NULL)
200 if ((int)req->nm_block_size <= 0)
202 if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
204 if (req->nm_frame_size < NL_MMAP_HDRLEN)
206 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
209 ring->frames_per_block = req->nm_block_size /
211 if (ring->frames_per_block == 0)
213 if (ring->frames_per_block * req->nm_block_nr !=
217 order = get_order(req->nm_block_size);
218 pg_vec = alloc_pg_vec(nlk, req, order);
222 if (req->nm_frame_nr)
227 mutex_lock(&nlk->pg_vec_lock);
228 if (closing || atomic_read(&nlk->mapped) == 0) {
230 spin_lock_bh(&queue->lock);
232 ring->frame_max = req->nm_frame_nr - 1;
234 ring->frame_size = req->nm_frame_size;
235 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
237 swap(ring->pg_vec_len, req->nm_block_nr);
238 swap(ring->pg_vec_order, order);
239 swap(ring->pg_vec, pg_vec);
241 __skb_queue_purge(queue);
242 spin_unlock_bh(&queue->lock);
244 WARN_ON(atomic_read(&nlk->mapped));
246 mutex_unlock(&nlk->pg_vec_lock);
249 free_pg_vec(pg_vec, order, req->nm_block_nr);
253 static void netlink_mm_open(struct vm_area_struct *vma)
255 struct file *file = vma->vm_file;
256 struct socket *sock = file->private_data;
257 struct sock *sk = sock->sk;
260 atomic_inc(&nlk_sk(sk)->mapped);
263 static void netlink_mm_close(struct vm_area_struct *vma)
265 struct file *file = vma->vm_file;
266 struct socket *sock = file->private_data;
267 struct sock *sk = sock->sk;
270 atomic_dec(&nlk_sk(sk)->mapped);
273 static const struct vm_operations_struct netlink_mmap_ops = {
274 .open = netlink_mm_open,
275 .close = netlink_mm_close,
278 static int netlink_mmap(struct file *file, struct socket *sock,
279 struct vm_area_struct *vma)
281 struct sock *sk = sock->sk;
282 struct netlink_sock *nlk = nlk_sk(sk);
283 struct netlink_ring *ring;
284 unsigned long start, size, expected;
291 mutex_lock(&nlk->pg_vec_lock);
294 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
295 if (ring->pg_vec == NULL)
297 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
303 size = vma->vm_end - vma->vm_start;
304 if (size != expected)
307 start = vma->vm_start;
308 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
309 if (ring->pg_vec == NULL)
312 for (i = 0; i < ring->pg_vec_len; i++) {
314 void *kaddr = ring->pg_vec[i];
317 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
318 page = pgvec_to_page(kaddr);
319 err = vm_insert_page(vma, start, page);
328 atomic_inc(&nlk->mapped);
329 vma->vm_ops = &netlink_mmap_ops;
332 mutex_unlock(&nlk->pg_vec_lock);
335 #else /* CONFIG_NETLINK_MMAP */
336 #define netlink_mmap sock_no_mmap
337 #endif /* CONFIG_NETLINK_MMAP */
339 static void netlink_destroy_callback(struct netlink_callback *cb)
345 static void netlink_consume_callback(struct netlink_callback *cb)
347 consume_skb(cb->skb);
351 static void netlink_skb_destructor(struct sk_buff *skb)
356 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
358 WARN_ON(skb->sk != NULL);
360 skb->destructor = netlink_skb_destructor;
361 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
362 sk_mem_charge(sk, skb->truesize);
365 static void netlink_sock_destruct(struct sock *sk)
367 struct netlink_sock *nlk = nlk_sk(sk);
371 nlk->cb->done(nlk->cb);
373 module_put(nlk->cb->module);
374 netlink_destroy_callback(nlk->cb);
377 skb_queue_purge(&sk->sk_receive_queue);
378 #ifdef CONFIG_NETLINK_MMAP
380 struct nl_mmap_req req;
382 memset(&req, 0, sizeof(req));
383 if (nlk->rx_ring.pg_vec)
384 netlink_set_ring(sk, &req, true, false);
385 memset(&req, 0, sizeof(req));
386 if (nlk->tx_ring.pg_vec)
387 netlink_set_ring(sk, &req, true, true);
389 #endif /* CONFIG_NETLINK_MMAP */
391 if (!sock_flag(sk, SOCK_DEAD)) {
392 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
396 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
397 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
398 WARN_ON(nlk_sk(sk)->groups);
401 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
402 * SMP. Look, when several writers sleep and reader wakes them up, all but one
403 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
404 * this, _but_ remember, it adds useless work on UP machines.
407 void netlink_table_grab(void)
408 __acquires(nl_table_lock)
412 write_lock_irq(&nl_table_lock);
414 if (atomic_read(&nl_table_users)) {
415 DECLARE_WAITQUEUE(wait, current);
417 add_wait_queue_exclusive(&nl_table_wait, &wait);
419 set_current_state(TASK_UNINTERRUPTIBLE);
420 if (atomic_read(&nl_table_users) == 0)
422 write_unlock_irq(&nl_table_lock);
424 write_lock_irq(&nl_table_lock);
427 __set_current_state(TASK_RUNNING);
428 remove_wait_queue(&nl_table_wait, &wait);
432 void netlink_table_ungrab(void)
433 __releases(nl_table_lock)
435 write_unlock_irq(&nl_table_lock);
436 wake_up(&nl_table_wait);
440 netlink_lock_table(void)
442 /* read_lock() synchronizes us to netlink_table_grab */
444 read_lock(&nl_table_lock);
445 atomic_inc(&nl_table_users);
446 read_unlock(&nl_table_lock);
450 netlink_unlock_table(void)
452 if (atomic_dec_and_test(&nl_table_users))
453 wake_up(&nl_table_wait);
456 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
458 struct nl_portid_hash *hash = &nl_table[protocol].hash;
459 struct hlist_head *head;
462 read_lock(&nl_table_lock);
463 head = nl_portid_hashfn(hash, portid);
464 sk_for_each(sk, head) {
465 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
472 read_unlock(&nl_table_lock);
476 static struct hlist_head *nl_portid_hash_zalloc(size_t size)
478 if (size <= PAGE_SIZE)
479 return kzalloc(size, GFP_ATOMIC);
481 return (struct hlist_head *)
482 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
486 static void nl_portid_hash_free(struct hlist_head *table, size_t size)
488 if (size <= PAGE_SIZE)
491 free_pages((unsigned long)table, get_order(size));
494 static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
496 unsigned int omask, mask, shift;
498 struct hlist_head *otable, *table;
501 omask = mask = hash->mask;
502 osize = size = (mask + 1) * sizeof(*table);
506 if (++shift > hash->max_shift)
512 table = nl_portid_hash_zalloc(size);
516 otable = hash->table;
520 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
522 for (i = 0; i <= omask; i++) {
524 struct hlist_node *tmp;
526 sk_for_each_safe(sk, tmp, &otable[i])
527 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
530 nl_portid_hash_free(otable, osize);
531 hash->rehash_time = jiffies + 10 * 60 * HZ;
535 static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
537 int avg = hash->entries >> hash->shift;
539 if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
542 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
543 nl_portid_hash_rehash(hash, 0);
550 static const struct proto_ops netlink_ops;
553 netlink_update_listeners(struct sock *sk)
555 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
558 struct listeners *listeners;
560 listeners = nl_deref_protected(tbl->listeners);
564 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
566 sk_for_each_bound(sk, &tbl->mc_list) {
567 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
568 mask |= nlk_sk(sk)->groups[i];
570 listeners->masks[i] = mask;
572 /* this function is only called with the netlink table "grabbed", which
573 * makes sure updates are visible before bind or setsockopt return. */
576 static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
578 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
579 struct hlist_head *head;
580 int err = -EADDRINUSE;
584 netlink_table_grab();
585 head = nl_portid_hashfn(hash, portid);
587 sk_for_each(osk, head) {
588 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
596 if (nlk_sk(sk)->portid)
600 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
603 if (len && nl_portid_hash_dilute(hash, len))
604 head = nl_portid_hashfn(hash, portid);
606 nlk_sk(sk)->portid = portid;
607 sk_add_node(sk, head);
611 netlink_table_ungrab();
615 static void netlink_remove(struct sock *sk)
617 netlink_table_grab();
618 if (sk_del_node_init(sk))
619 nl_table[sk->sk_protocol].hash.entries--;
620 if (nlk_sk(sk)->subscriptions)
621 __sk_del_bind_node(sk);
622 netlink_table_ungrab();
625 static struct proto netlink_proto = {
627 .owner = THIS_MODULE,
628 .obj_size = sizeof(struct netlink_sock),
631 static int __netlink_create(struct net *net, struct socket *sock,
632 struct mutex *cb_mutex, int protocol)
635 struct netlink_sock *nlk;
637 sock->ops = &netlink_ops;
639 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
643 sock_init_data(sock, sk);
647 nlk->cb_mutex = cb_mutex;
649 nlk->cb_mutex = &nlk->cb_def_mutex;
650 mutex_init(nlk->cb_mutex);
652 init_waitqueue_head(&nlk->wait);
653 #ifdef CONFIG_NETLINK_MMAP
654 mutex_init(&nlk->pg_vec_lock);
657 sk->sk_destruct = netlink_sock_destruct;
658 sk->sk_protocol = protocol;
662 static int netlink_create(struct net *net, struct socket *sock, int protocol,
665 struct module *module = NULL;
666 struct mutex *cb_mutex;
667 struct netlink_sock *nlk;
668 void (*bind)(int group);
671 sock->state = SS_UNCONNECTED;
673 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
674 return -ESOCKTNOSUPPORT;
676 if (protocol < 0 || protocol >= MAX_LINKS)
677 return -EPROTONOSUPPORT;
679 netlink_lock_table();
680 #ifdef CONFIG_MODULES
681 if (!nl_table[protocol].registered) {
682 netlink_unlock_table();
683 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
684 netlink_lock_table();
687 if (nl_table[protocol].registered &&
688 try_module_get(nl_table[protocol].module))
689 module = nl_table[protocol].module;
691 err = -EPROTONOSUPPORT;
692 cb_mutex = nl_table[protocol].cb_mutex;
693 bind = nl_table[protocol].bind;
694 netlink_unlock_table();
699 err = __netlink_create(net, sock, cb_mutex, protocol);
704 sock_prot_inuse_add(net, &netlink_proto, 1);
707 nlk = nlk_sk(sock->sk);
708 nlk->module = module;
709 nlk->netlink_bind = bind;
718 static int netlink_release(struct socket *sock)
720 struct sock *sk = sock->sk;
721 struct netlink_sock *nlk;
731 * OK. Socket is unlinked, any packets that arrive now
736 wake_up_interruptible_all(&nlk->wait);
738 skb_queue_purge(&sk->sk_write_queue);
741 struct netlink_notify n = {
743 .protocol = sk->sk_protocol,
744 .portid = nlk->portid,
746 atomic_notifier_call_chain(&netlink_chain,
747 NETLINK_URELEASE, &n);
750 module_put(nlk->module);
752 netlink_table_grab();
753 if (netlink_is_kernel(sk)) {
754 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
755 if (--nl_table[sk->sk_protocol].registered == 0) {
756 struct listeners *old;
758 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
759 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
761 nl_table[sk->sk_protocol].module = NULL;
762 nl_table[sk->sk_protocol].bind = NULL;
763 nl_table[sk->sk_protocol].flags = 0;
764 nl_table[sk->sk_protocol].registered = 0;
766 } else if (nlk->subscriptions) {
767 netlink_update_listeners(sk);
769 netlink_table_ungrab();
775 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
781 static int netlink_autobind(struct socket *sock)
783 struct sock *sk = sock->sk;
784 struct net *net = sock_net(sk);
785 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
786 struct hlist_head *head;
788 s32 portid = task_tgid_vnr(current);
790 static s32 rover = -4097;
794 netlink_table_grab();
795 head = nl_portid_hashfn(hash, portid);
796 sk_for_each(osk, head) {
797 if (!net_eq(sock_net(osk), net))
799 if (nlk_sk(osk)->portid == portid) {
800 /* Bind collision, search negative portid values. */
804 netlink_table_ungrab();
808 netlink_table_ungrab();
810 err = netlink_insert(sk, net, portid);
811 if (err == -EADDRINUSE)
814 /* If 2 threads race to autobind, that is fine. */
821 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
823 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
824 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
828 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
830 struct netlink_sock *nlk = nlk_sk(sk);
832 if (nlk->subscriptions && !subscriptions)
833 __sk_del_bind_node(sk);
834 else if (!nlk->subscriptions && subscriptions)
835 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
836 nlk->subscriptions = subscriptions;
839 static int netlink_realloc_groups(struct sock *sk)
841 struct netlink_sock *nlk = nlk_sk(sk);
843 unsigned long *new_groups;
846 netlink_table_grab();
848 groups = nl_table[sk->sk_protocol].groups;
849 if (!nl_table[sk->sk_protocol].registered) {
854 if (nlk->ngroups >= groups)
857 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
858 if (new_groups == NULL) {
862 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
863 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
865 nlk->groups = new_groups;
866 nlk->ngroups = groups;
868 netlink_table_ungrab();
872 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
875 struct sock *sk = sock->sk;
876 struct net *net = sock_net(sk);
877 struct netlink_sock *nlk = nlk_sk(sk);
878 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
881 if (addr_len < sizeof(struct sockaddr_nl))
884 if (nladdr->nl_family != AF_NETLINK)
887 /* Only superuser is allowed to listen multicasts */
888 if (nladdr->nl_groups) {
889 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
891 err = netlink_realloc_groups(sk);
897 if (nladdr->nl_pid != nlk->portid)
900 err = nladdr->nl_pid ?
901 netlink_insert(sk, net, nladdr->nl_pid) :
902 netlink_autobind(sock);
907 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
910 netlink_table_grab();
911 netlink_update_subscriptions(sk, nlk->subscriptions +
912 hweight32(nladdr->nl_groups) -
913 hweight32(nlk->groups[0]));
914 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
915 netlink_update_listeners(sk);
916 netlink_table_ungrab();
918 if (nlk->netlink_bind && nlk->groups[0]) {
921 for (i=0; i<nlk->ngroups; i++) {
922 if (test_bit(i, nlk->groups))
923 nlk->netlink_bind(i);
930 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
934 struct sock *sk = sock->sk;
935 struct netlink_sock *nlk = nlk_sk(sk);
936 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
938 if (alen < sizeof(addr->sa_family))
941 if (addr->sa_family == AF_UNSPEC) {
942 sk->sk_state = NETLINK_UNCONNECTED;
947 if (addr->sa_family != AF_NETLINK)
950 /* Only superuser is allowed to send multicasts */
951 if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
955 err = netlink_autobind(sock);
958 sk->sk_state = NETLINK_CONNECTED;
959 nlk->dst_portid = nladdr->nl_pid;
960 nlk->dst_group = ffs(nladdr->nl_groups);
966 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
967 int *addr_len, int peer)
969 struct sock *sk = sock->sk;
970 struct netlink_sock *nlk = nlk_sk(sk);
971 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
973 nladdr->nl_family = AF_NETLINK;
975 *addr_len = sizeof(*nladdr);
978 nladdr->nl_pid = nlk->dst_portid;
979 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
981 nladdr->nl_pid = nlk->portid;
982 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
987 static void netlink_overrun(struct sock *sk)
989 struct netlink_sock *nlk = nlk_sk(sk);
991 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
992 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
993 sk->sk_err = ENOBUFS;
994 sk->sk_error_report(sk);
997 atomic_inc(&sk->sk_drops);
1000 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1003 struct netlink_sock *nlk;
1005 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1007 return ERR_PTR(-ECONNREFUSED);
1009 /* Don't bother queuing skb if kernel socket has no input function */
1011 if (sock->sk_state == NETLINK_CONNECTED &&
1012 nlk->dst_portid != nlk_sk(ssk)->portid) {
1014 return ERR_PTR(-ECONNREFUSED);
1019 struct sock *netlink_getsockbyfilp(struct file *filp)
1021 struct inode *inode = file_inode(filp);
1024 if (!S_ISSOCK(inode->i_mode))
1025 return ERR_PTR(-ENOTSOCK);
1027 sock = SOCKET_I(inode)->sk;
1028 if (sock->sk_family != AF_NETLINK)
1029 return ERR_PTR(-EINVAL);
1036 * Attach a skb to a netlink socket.
1037 * The caller must hold a reference to the destination socket. On error, the
1038 * reference is dropped. The skb is not send to the destination, just all
1039 * all error checks are performed and memory in the queue is reserved.
1041 * < 0: error. skb freed, reference to sock dropped.
1043 * 1: repeat lookup - reference dropped while waiting for socket memory.
1045 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1046 long *timeo, struct sock *ssk)
1048 struct netlink_sock *nlk;
1052 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1053 test_bit(NETLINK_CONGESTED, &nlk->state)) {
1054 DECLARE_WAITQUEUE(wait, current);
1056 if (!ssk || netlink_is_kernel(ssk))
1057 netlink_overrun(sk);
1063 __set_current_state(TASK_INTERRUPTIBLE);
1064 add_wait_queue(&nlk->wait, &wait);
1066 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1067 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1068 !sock_flag(sk, SOCK_DEAD))
1069 *timeo = schedule_timeout(*timeo);
1071 __set_current_state(TASK_RUNNING);
1072 remove_wait_queue(&nlk->wait, &wait);
1075 if (signal_pending(current)) {
1077 return sock_intr_errno(*timeo);
1081 netlink_skb_set_owner_r(skb, sk);
1085 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1089 skb_queue_tail(&sk->sk_receive_queue, skb);
1090 sk->sk_data_ready(sk, len);
1094 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1096 int len = __netlink_sendskb(sk, skb);
1102 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1108 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1112 WARN_ON(skb->sk != NULL);
1114 delta = skb->end - skb->tail;
1115 if (delta * 2 < skb->truesize)
1118 if (skb_shared(skb)) {
1119 struct sk_buff *nskb = skb_clone(skb, allocation);
1126 if (!pskb_expand_head(skb, 0, -delta, allocation))
1127 skb->truesize -= delta;
1132 static void netlink_rcv_wake(struct sock *sk)
1134 struct netlink_sock *nlk = nlk_sk(sk);
1136 if (skb_queue_empty(&sk->sk_receive_queue))
1137 clear_bit(NETLINK_CONGESTED, &nlk->state);
1138 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
1139 wake_up_interruptible(&nlk->wait);
1142 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1146 struct netlink_sock *nlk = nlk_sk(sk);
1148 ret = -ECONNREFUSED;
1149 if (nlk->netlink_rcv != NULL) {
1151 netlink_skb_set_owner_r(skb, sk);
1152 NETLINK_CB(skb).sk = ssk;
1153 nlk->netlink_rcv(skb);
1162 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1163 u32 portid, int nonblock)
1169 skb = netlink_trim(skb, gfp_any());
1171 timeo = sock_sndtimeo(ssk, nonblock);
1173 sk = netlink_getsockbyportid(ssk, portid);
1178 if (netlink_is_kernel(sk))
1179 return netlink_unicast_kernel(sk, skb, ssk);
1181 if (sk_filter(sk, skb)) {
1188 err = netlink_attachskb(sk, skb, &timeo, ssk);
1194 return netlink_sendskb(sk, skb);
1196 EXPORT_SYMBOL(netlink_unicast);
1198 int netlink_has_listeners(struct sock *sk, unsigned int group)
1201 struct listeners *listeners;
1203 BUG_ON(!netlink_is_kernel(sk));
1206 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1208 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1209 res = test_bit(group - 1, listeners->masks);
1215 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1217 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1219 struct netlink_sock *nlk = nlk_sk(sk);
1221 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1222 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
1223 netlink_skb_set_owner_r(skb, sk);
1224 __netlink_sendskb(sk, skb);
1225 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1230 struct netlink_broadcast_data {
1231 struct sock *exclude_sk;
1236 int delivery_failure;
1240 struct sk_buff *skb, *skb2;
1241 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1245 static int do_one_broadcast(struct sock *sk,
1246 struct netlink_broadcast_data *p)
1248 struct netlink_sock *nlk = nlk_sk(sk);
1251 if (p->exclude_sk == sk)
1254 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1255 !test_bit(p->group - 1, nlk->groups))
1258 if (!net_eq(sock_net(sk), p->net))
1262 netlink_overrun(sk);
1267 if (p->skb2 == NULL) {
1268 if (skb_shared(p->skb)) {
1269 p->skb2 = skb_clone(p->skb, p->allocation);
1271 p->skb2 = skb_get(p->skb);
1273 * skb ownership may have been set when
1274 * delivered to a previous socket.
1276 skb_orphan(p->skb2);
1279 if (p->skb2 == NULL) {
1280 netlink_overrun(sk);
1281 /* Clone failed. Notify ALL listeners. */
1283 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1284 p->delivery_failure = 1;
1285 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1288 } else if (sk_filter(sk, p->skb2)) {
1291 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1292 netlink_overrun(sk);
1293 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1294 p->delivery_failure = 1;
1296 p->congested |= val;
1306 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1307 u32 group, gfp_t allocation,
1308 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1311 struct net *net = sock_net(ssk);
1312 struct netlink_broadcast_data info;
1315 skb = netlink_trim(skb, allocation);
1317 info.exclude_sk = ssk;
1319 info.portid = portid;
1322 info.delivery_failure = 0;
1325 info.allocation = allocation;
1328 info.tx_filter = filter;
1329 info.tx_data = filter_data;
1331 /* While we sleep in clone, do not allow to change socket list */
1333 netlink_lock_table();
1335 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1336 do_one_broadcast(sk, &info);
1340 netlink_unlock_table();
1342 if (info.delivery_failure) {
1343 kfree_skb(info.skb2);
1346 consume_skb(info.skb2);
1348 if (info.delivered) {
1349 if (info.congested && (allocation & __GFP_WAIT))
1355 EXPORT_SYMBOL(netlink_broadcast_filtered);
1357 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1358 u32 group, gfp_t allocation)
1360 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1363 EXPORT_SYMBOL(netlink_broadcast);
1365 struct netlink_set_err_data {
1366 struct sock *exclude_sk;
1372 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1374 struct netlink_sock *nlk = nlk_sk(sk);
1377 if (sk == p->exclude_sk)
1380 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1383 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1384 !test_bit(p->group - 1, nlk->groups))
1387 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1392 sk->sk_err = p->code;
1393 sk->sk_error_report(sk);
1399 * netlink_set_err - report error to broadcast listeners
1400 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1401 * @portid: the PORTID of a process that we want to skip (if any)
1402 * @groups: the broadcast group that will notice the error
1403 * @code: error code, must be negative (as usual in kernelspace)
1405 * This function returns the number of broadcast listeners that have set the
1406 * NETLINK_RECV_NO_ENOBUFS socket option.
1408 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1410 struct netlink_set_err_data info;
1414 info.exclude_sk = ssk;
1415 info.portid = portid;
1417 /* sk->sk_err wants a positive error value */
1420 read_lock(&nl_table_lock);
1422 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1423 ret += do_one_set_err(sk, &info);
1425 read_unlock(&nl_table_lock);
1428 EXPORT_SYMBOL(netlink_set_err);
1430 /* must be called with netlink table grabbed */
1431 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1435 int old, new = !!is_new, subscriptions;
1437 old = test_bit(group - 1, nlk->groups);
1438 subscriptions = nlk->subscriptions - old + new;
1440 __set_bit(group - 1, nlk->groups);
1442 __clear_bit(group - 1, nlk->groups);
1443 netlink_update_subscriptions(&nlk->sk, subscriptions);
1444 netlink_update_listeners(&nlk->sk);
1447 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1448 char __user *optval, unsigned int optlen)
1450 struct sock *sk = sock->sk;
1451 struct netlink_sock *nlk = nlk_sk(sk);
1452 unsigned int val = 0;
1455 if (level != SOL_NETLINK)
1456 return -ENOPROTOOPT;
1458 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
1459 optlen >= sizeof(int) &&
1460 get_user(val, (unsigned int __user *)optval))
1464 case NETLINK_PKTINFO:
1466 nlk->flags |= NETLINK_RECV_PKTINFO;
1468 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1471 case NETLINK_ADD_MEMBERSHIP:
1472 case NETLINK_DROP_MEMBERSHIP: {
1473 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
1475 err = netlink_realloc_groups(sk);
1478 if (!val || val - 1 >= nlk->ngroups)
1480 netlink_table_grab();
1481 netlink_update_socket_mc(nlk, val,
1482 optname == NETLINK_ADD_MEMBERSHIP);
1483 netlink_table_ungrab();
1485 if (nlk->netlink_bind)
1486 nlk->netlink_bind(val);
1491 case NETLINK_BROADCAST_ERROR:
1493 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
1495 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
1498 case NETLINK_NO_ENOBUFS:
1500 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
1501 clear_bit(NETLINK_CONGESTED, &nlk->state);
1502 wake_up_interruptible(&nlk->wait);
1504 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
1508 #ifdef CONFIG_NETLINK_MMAP
1509 case NETLINK_RX_RING:
1510 case NETLINK_TX_RING: {
1511 struct nl_mmap_req req;
1513 /* Rings might consume more memory than queue limits, require
1516 if (!capable(CAP_NET_ADMIN))
1518 if (optlen < sizeof(req))
1520 if (copy_from_user(&req, optval, sizeof(req)))
1522 err = netlink_set_ring(sk, &req, false,
1523 optname == NETLINK_TX_RING);
1526 #endif /* CONFIG_NETLINK_MMAP */
1533 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1534 char __user *optval, int __user *optlen)
1536 struct sock *sk = sock->sk;
1537 struct netlink_sock *nlk = nlk_sk(sk);
1540 if (level != SOL_NETLINK)
1541 return -ENOPROTOOPT;
1543 if (get_user(len, optlen))
1549 case NETLINK_PKTINFO:
1550 if (len < sizeof(int))
1553 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1554 if (put_user(len, optlen) ||
1555 put_user(val, optval))
1559 case NETLINK_BROADCAST_ERROR:
1560 if (len < sizeof(int))
1563 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
1564 if (put_user(len, optlen) ||
1565 put_user(val, optval))
1569 case NETLINK_NO_ENOBUFS:
1570 if (len < sizeof(int))
1573 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
1574 if (put_user(len, optlen) ||
1575 put_user(val, optval))
1585 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1587 struct nl_pktinfo info;
1589 info.group = NETLINK_CB(skb).dst_group;
1590 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1593 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1594 struct msghdr *msg, size_t len)
1596 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1597 struct sock *sk = sock->sk;
1598 struct netlink_sock *nlk = nlk_sk(sk);
1599 struct sockaddr_nl *addr = msg->msg_name;
1602 struct sk_buff *skb;
1604 struct scm_cookie scm;
1606 if (msg->msg_flags&MSG_OOB)
1609 if (NULL == siocb->scm)
1612 err = scm_send(sock, msg, siocb->scm, true);
1616 if (msg->msg_namelen) {
1618 if (addr->nl_family != AF_NETLINK)
1620 dst_portid = addr->nl_pid;
1621 dst_group = ffs(addr->nl_groups);
1623 if ((dst_group || dst_portid) &&
1624 !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
1627 dst_portid = nlk->dst_portid;
1628 dst_group = nlk->dst_group;
1632 err = netlink_autobind(sock);
1638 if (len > sk->sk_sndbuf - 32)
1641 skb = alloc_skb(len, GFP_KERNEL);
1645 NETLINK_CB(skb).portid = nlk->portid;
1646 NETLINK_CB(skb).dst_group = dst_group;
1647 NETLINK_CB(skb).creds = siocb->scm->creds;
1650 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1655 err = security_netlink_send(sk, skb);
1662 atomic_inc(&skb->users);
1663 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1665 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1668 scm_destroy(siocb->scm);
1672 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1673 struct msghdr *msg, size_t len,
1676 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1677 struct scm_cookie scm;
1678 struct sock *sk = sock->sk;
1679 struct netlink_sock *nlk = nlk_sk(sk);
1680 int noblock = flags&MSG_DONTWAIT;
1682 struct sk_buff *skb, *data_skb;
1690 skb = skb_recv_datagram(sk, flags, noblock, &err);
1696 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1697 if (unlikely(skb_shinfo(skb)->frag_list)) {
1699 * If this skb has a frag_list, then here that means that we
1700 * will have to use the frag_list skb's data for compat tasks
1701 * and the regular skb's data for normal (non-compat) tasks.
1703 * If we need to send the compat skb, assign it to the
1704 * 'data_skb' variable so that it will be used below for data
1705 * copying. We keep 'skb' for everything else, including
1706 * freeing both later.
1708 if (flags & MSG_CMSG_COMPAT)
1709 data_skb = skb_shinfo(skb)->frag_list;
1713 msg->msg_namelen = 0;
1715 copied = data_skb->len;
1717 msg->msg_flags |= MSG_TRUNC;
1721 skb_reset_transport_header(data_skb);
1722 err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
1724 if (msg->msg_name) {
1725 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
1726 addr->nl_family = AF_NETLINK;
1728 addr->nl_pid = NETLINK_CB(skb).portid;
1729 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1730 msg->msg_namelen = sizeof(*addr);
1733 if (nlk->flags & NETLINK_RECV_PKTINFO)
1734 netlink_cmsg_recv_pktinfo(msg, skb);
1736 if (NULL == siocb->scm) {
1737 memset(&scm, 0, sizeof(scm));
1740 siocb->scm->creds = *NETLINK_CREDS(skb);
1741 if (flags & MSG_TRUNC)
1742 copied = data_skb->len;
1744 skb_free_datagram(sk, skb);
1746 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1747 ret = netlink_dump(sk);
1750 sk->sk_error_report(sk);
1754 scm_recv(sock, msg, siocb->scm, flags);
1756 netlink_rcv_wake(sk);
1757 return err ? : copied;
1760 static void netlink_data_ready(struct sock *sk, int len)
1766 * We export these functions to other modules. They provide a
1767 * complete set of kernel non-blocking support for message
1772 __netlink_kernel_create(struct net *net, int unit, struct module *module,
1773 struct netlink_kernel_cfg *cfg)
1775 struct socket *sock;
1777 struct netlink_sock *nlk;
1778 struct listeners *listeners = NULL;
1779 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
1780 unsigned int groups;
1784 if (unit < 0 || unit >= MAX_LINKS)
1787 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1791 * We have to just have a reference on the net from sk, but don't
1792 * get_net it. Besides, we cannot get and then put the net here.
1793 * So we create one inside init_net and the move it to net.
1796 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
1797 goto out_sock_release_nosk;
1800 sk_change_net(sk, net);
1802 if (!cfg || cfg->groups < 32)
1805 groups = cfg->groups;
1807 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1809 goto out_sock_release;
1811 sk->sk_data_ready = netlink_data_ready;
1812 if (cfg && cfg->input)
1813 nlk_sk(sk)->netlink_rcv = cfg->input;
1815 if (netlink_insert(sk, net, 0))
1816 goto out_sock_release;
1819 nlk->flags |= NETLINK_KERNEL_SOCKET;
1821 netlink_table_grab();
1822 if (!nl_table[unit].registered) {
1823 nl_table[unit].groups = groups;
1824 rcu_assign_pointer(nl_table[unit].listeners, listeners);
1825 nl_table[unit].cb_mutex = cb_mutex;
1826 nl_table[unit].module = module;
1828 nl_table[unit].bind = cfg->bind;
1829 nl_table[unit].flags = cfg->flags;
1831 nl_table[unit].registered = 1;
1834 nl_table[unit].registered++;
1836 netlink_table_ungrab();
1841 netlink_kernel_release(sk);
1844 out_sock_release_nosk:
1848 EXPORT_SYMBOL(__netlink_kernel_create);
1851 netlink_kernel_release(struct sock *sk)
1853 sk_release_kernel(sk);
1855 EXPORT_SYMBOL(netlink_kernel_release);
1857 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1859 struct listeners *new, *old;
1860 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1865 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1866 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1869 old = nl_deref_protected(tbl->listeners);
1870 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1871 rcu_assign_pointer(tbl->listeners, new);
1873 kfree_rcu(old, rcu);
1875 tbl->groups = groups;
1881 * netlink_change_ngroups - change number of multicast groups
1883 * This changes the number of multicast groups that are available
1884 * on a certain netlink family. Note that it is not possible to
1885 * change the number of groups to below 32. Also note that it does
1886 * not implicitly call netlink_clear_multicast_users() when the
1887 * number of groups is reduced.
1889 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1890 * @groups: The new number of groups.
1892 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1896 netlink_table_grab();
1897 err = __netlink_change_ngroups(sk, groups);
1898 netlink_table_ungrab();
1903 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1906 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1908 sk_for_each_bound(sk, &tbl->mc_list)
1909 netlink_update_socket_mc(nlk_sk(sk), group, 0);
1913 * netlink_clear_multicast_users - kick off multicast listeners
1915 * This function removes all listeners from the given group.
1916 * @ksk: The kernel netlink socket, as returned by
1917 * netlink_kernel_create().
1918 * @group: The multicast group to clear.
1920 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1922 netlink_table_grab();
1923 __netlink_clear_multicast_users(ksk, group);
1924 netlink_table_ungrab();
1928 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
1930 struct nlmsghdr *nlh;
1931 int size = nlmsg_msg_size(len);
1933 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
1934 nlh->nlmsg_type = type;
1935 nlh->nlmsg_len = size;
1936 nlh->nlmsg_flags = flags;
1937 nlh->nlmsg_pid = portid;
1938 nlh->nlmsg_seq = seq;
1939 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1940 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
1943 EXPORT_SYMBOL(__nlmsg_put);
1946 * It looks a bit ugly.
1947 * It would be better to create kernel thread.
1950 static int netlink_dump(struct sock *sk)
1952 struct netlink_sock *nlk = nlk_sk(sk);
1953 struct netlink_callback *cb;
1954 struct sk_buff *skb = NULL;
1955 struct nlmsghdr *nlh;
1956 int len, err = -ENOBUFS;
1959 mutex_lock(nlk->cb_mutex);
1967 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
1969 skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
1973 len = cb->dump(skb, cb);
1976 mutex_unlock(nlk->cb_mutex);
1978 if (sk_filter(sk, skb))
1981 __netlink_sendskb(sk, skb);
1985 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1989 nl_dump_check_consistent(cb, nlh);
1991 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1993 if (sk_filter(sk, skb))
1996 __netlink_sendskb(sk, skb);
2001 mutex_unlock(nlk->cb_mutex);
2003 module_put(cb->module);
2004 netlink_consume_callback(cb);
2008 mutex_unlock(nlk->cb_mutex);
2013 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2014 const struct nlmsghdr *nlh,
2015 struct netlink_dump_control *control)
2017 struct netlink_callback *cb;
2019 struct netlink_sock *nlk;
2022 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
2026 cb->dump = control->dump;
2027 cb->done = control->done;
2029 cb->data = control->data;
2030 cb->module = control->module;
2031 cb->min_dump_alloc = control->min_dump_alloc;
2032 atomic_inc(&skb->users);
2035 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2037 netlink_destroy_callback(cb);
2038 return -ECONNREFUSED;
2042 mutex_lock(nlk->cb_mutex);
2043 /* A dump is in progress... */
2045 mutex_unlock(nlk->cb_mutex);
2046 netlink_destroy_callback(cb);
2050 /* add reference of module which cb->dump belongs to */
2051 if (!try_module_get(cb->module)) {
2052 mutex_unlock(nlk->cb_mutex);
2053 netlink_destroy_callback(cb);
2054 ret = -EPROTONOSUPPORT;
2059 mutex_unlock(nlk->cb_mutex);
2061 ret = netlink_dump(sk);
2068 /* We successfully started a dump, by returning -EINTR we
2069 * signal not to send ACK even if it was requested.
2073 EXPORT_SYMBOL(__netlink_dump_start);
2075 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2077 struct sk_buff *skb;
2078 struct nlmsghdr *rep;
2079 struct nlmsgerr *errmsg;
2080 size_t payload = sizeof(*errmsg);
2082 /* error messages get the original request appened */
2084 payload += nlmsg_len(nlh);
2086 skb = nlmsg_new(payload, GFP_KERNEL);
2090 sk = netlink_lookup(sock_net(in_skb->sk),
2091 in_skb->sk->sk_protocol,
2092 NETLINK_CB(in_skb).portid);
2094 sk->sk_err = ENOBUFS;
2095 sk->sk_error_report(sk);
2101 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2102 NLMSG_ERROR, payload, 0);
2103 errmsg = nlmsg_data(rep);
2104 errmsg->error = err;
2105 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
2106 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2108 EXPORT_SYMBOL(netlink_ack);
2110 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2113 struct nlmsghdr *nlh;
2116 while (skb->len >= nlmsg_total_size(0)) {
2119 nlh = nlmsg_hdr(skb);
2122 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2125 /* Only requests are handled by the kernel */
2126 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2129 /* Skip control messages */
2130 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2138 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2139 netlink_ack(skb, nlh, err);
2142 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2143 if (msglen > skb->len)
2145 skb_pull(skb, msglen);
2150 EXPORT_SYMBOL(netlink_rcv_skb);
2153 * nlmsg_notify - send a notification netlink message
2154 * @sk: netlink socket to use
2155 * @skb: notification message
2156 * @portid: destination netlink portid for reports or 0
2157 * @group: destination multicast group or 0
2158 * @report: 1 to report back, 0 to disable
2159 * @flags: allocation flags
2161 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2162 unsigned int group, int report, gfp_t flags)
2167 int exclude_portid = 0;
2170 atomic_inc(&skb->users);
2171 exclude_portid = portid;
2174 /* errors reported via destination sk->sk_err, but propagate
2175 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2176 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2182 err2 = nlmsg_unicast(sk, skb, portid);
2183 if (!err || err == -ESRCH)
2189 EXPORT_SYMBOL(nlmsg_notify);
2191 #ifdef CONFIG_PROC_FS
2192 struct nl_seq_iter {
2193 struct seq_net_private p;
2198 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2200 struct nl_seq_iter *iter = seq->private;
2205 for (i = 0; i < MAX_LINKS; i++) {
2206 struct nl_portid_hash *hash = &nl_table[i].hash;
2208 for (j = 0; j <= hash->mask; j++) {
2209 sk_for_each(s, &hash->table[j]) {
2210 if (sock_net(s) != seq_file_net(seq))
2224 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
2225 __acquires(nl_table_lock)
2227 read_lock(&nl_table_lock);
2228 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2231 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2234 struct nl_seq_iter *iter;
2239 if (v == SEQ_START_TOKEN)
2240 return netlink_seq_socket_idx(seq, 0);
2242 iter = seq->private;
2246 } while (s && sock_net(s) != seq_file_net(seq));
2251 j = iter->hash_idx + 1;
2254 struct nl_portid_hash *hash = &nl_table[i].hash;
2256 for (; j <= hash->mask; j++) {
2257 s = sk_head(&hash->table[j]);
2258 while (s && sock_net(s) != seq_file_net(seq))
2268 } while (++i < MAX_LINKS);
2273 static void netlink_seq_stop(struct seq_file *seq, void *v)
2274 __releases(nl_table_lock)
2276 read_unlock(&nl_table_lock);
2280 static int netlink_seq_show(struct seq_file *seq, void *v)
2282 if (v == SEQ_START_TOKEN) {
2284 "sk Eth Pid Groups "
2285 "Rmem Wmem Dump Locks Drops Inode\n");
2288 struct netlink_sock *nlk = nlk_sk(s);
2290 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2294 nlk->groups ? (u32)nlk->groups[0] : 0,
2295 sk_rmem_alloc_get(s),
2296 sk_wmem_alloc_get(s),
2298 atomic_read(&s->sk_refcnt),
2299 atomic_read(&s->sk_drops),
2307 static const struct seq_operations netlink_seq_ops = {
2308 .start = netlink_seq_start,
2309 .next = netlink_seq_next,
2310 .stop = netlink_seq_stop,
2311 .show = netlink_seq_show,
2315 static int netlink_seq_open(struct inode *inode, struct file *file)
2317 return seq_open_net(inode, file, &netlink_seq_ops,
2318 sizeof(struct nl_seq_iter));
2321 static const struct file_operations netlink_seq_fops = {
2322 .owner = THIS_MODULE,
2323 .open = netlink_seq_open,
2325 .llseek = seq_lseek,
2326 .release = seq_release_net,
2331 int netlink_register_notifier(struct notifier_block *nb)
2333 return atomic_notifier_chain_register(&netlink_chain, nb);
2335 EXPORT_SYMBOL(netlink_register_notifier);
2337 int netlink_unregister_notifier(struct notifier_block *nb)
2339 return atomic_notifier_chain_unregister(&netlink_chain, nb);
2341 EXPORT_SYMBOL(netlink_unregister_notifier);
2343 static const struct proto_ops netlink_ops = {
2344 .family = PF_NETLINK,
2345 .owner = THIS_MODULE,
2346 .release = netlink_release,
2347 .bind = netlink_bind,
2348 .connect = netlink_connect,
2349 .socketpair = sock_no_socketpair,
2350 .accept = sock_no_accept,
2351 .getname = netlink_getname,
2352 .poll = datagram_poll,
2353 .ioctl = sock_no_ioctl,
2354 .listen = sock_no_listen,
2355 .shutdown = sock_no_shutdown,
2356 .setsockopt = netlink_setsockopt,
2357 .getsockopt = netlink_getsockopt,
2358 .sendmsg = netlink_sendmsg,
2359 .recvmsg = netlink_recvmsg,
2360 .mmap = netlink_mmap,
2361 .sendpage = sock_no_sendpage,
2364 static const struct net_proto_family netlink_family_ops = {
2365 .family = PF_NETLINK,
2366 .create = netlink_create,
2367 .owner = THIS_MODULE, /* for consistency 8) */
2370 static int __net_init netlink_net_init(struct net *net)
2372 #ifdef CONFIG_PROC_FS
2373 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
2379 static void __net_exit netlink_net_exit(struct net *net)
2381 #ifdef CONFIG_PROC_FS
2382 remove_proc_entry("netlink", net->proc_net);
2386 static void __init netlink_add_usersock_entry(void)
2388 struct listeners *listeners;
2391 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2393 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2395 netlink_table_grab();
2397 nl_table[NETLINK_USERSOCK].groups = groups;
2398 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2399 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2400 nl_table[NETLINK_USERSOCK].registered = 1;
2401 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2403 netlink_table_ungrab();
2406 static struct pernet_operations __net_initdata netlink_net_ops = {
2407 .init = netlink_net_init,
2408 .exit = netlink_net_exit,
2411 static int __init netlink_proto_init(void)
2414 unsigned long limit;
2416 int err = proto_register(&netlink_proto, 0);
2421 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2423 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2427 if (totalram_pages >= (128 * 1024))
2428 limit = totalram_pages >> (21 - PAGE_SHIFT);
2430 limit = totalram_pages >> (23 - PAGE_SHIFT);
2432 order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
2433 limit = (1UL << order) / sizeof(struct hlist_head);
2434 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
2436 for (i = 0; i < MAX_LINKS; i++) {
2437 struct nl_portid_hash *hash = &nl_table[i].hash;
2439 hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
2442 nl_portid_hash_free(nl_table[i].hash.table,
2443 1 * sizeof(*hash->table));
2447 hash->max_shift = order;
2450 hash->rehash_time = jiffies;
2453 netlink_add_usersock_entry();
2455 sock_register(&netlink_family_ops);
2456 register_pernet_subsys(&netlink_net_ops);
2457 /* The netlink device handler may be needed early. */
2462 panic("netlink_init: Cannot allocate nl_table\n");
2465 core_initcall(netlink_proto_init);