2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/mutex.h>
58 #include <linux/vmalloc.h>
59 #include <asm/cacheflush.h>
61 #include <net/net_namespace.h>
64 #include <net/netlink.h>
66 #include "af_netlink.h"
70 unsigned long masks[0];
74 #define NETLINK_CONGESTED 0x0
77 #define NETLINK_KERNEL_SOCKET 0x1
78 #define NETLINK_RECV_PKTINFO 0x2
79 #define NETLINK_BROADCAST_SEND_ERROR 0x4
80 #define NETLINK_RECV_NO_ENOBUFS 0x8
82 static inline int netlink_is_kernel(struct sock *sk)
84 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
87 struct netlink_table *nl_table;
88 EXPORT_SYMBOL_GPL(nl_table);
90 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
92 static int netlink_dump(struct sock *sk);
93 static void netlink_skb_destructor(struct sk_buff *skb);
95 DEFINE_RWLOCK(nl_table_lock);
96 EXPORT_SYMBOL_GPL(nl_table_lock);
97 static atomic_t nl_table_users = ATOMIC_INIT(0);
99 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
101 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
103 static inline u32 netlink_group_mask(u32 group)
105 return group ? 1 << (group - 1) : 0;
108 static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
110 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
113 #ifdef CONFIG_NETLINK_MMAP
114 static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
116 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
119 static bool netlink_tx_is_mmaped(struct sock *sk)
121 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
124 static __pure struct page *pgvec_to_page(const void *addr)
126 if (is_vmalloc_addr(addr))
127 return vmalloc_to_page(addr);
129 return virt_to_page(addr);
132 static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
136 for (i = 0; i < len; i++) {
137 if (pg_vec[i] != NULL) {
138 if (is_vmalloc_addr(pg_vec[i]))
141 free_pages((unsigned long)pg_vec[i], order);
147 static void *alloc_one_pg_vec_page(unsigned long order)
150 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
151 __GFP_NOWARN | __GFP_NORETRY;
153 buffer = (void *)__get_free_pages(gfp_flags, order);
157 buffer = vzalloc((1 << order) * PAGE_SIZE);
161 gfp_flags &= ~__GFP_NORETRY;
162 return (void *)__get_free_pages(gfp_flags, order);
165 static void **alloc_pg_vec(struct netlink_sock *nlk,
166 struct nl_mmap_req *req, unsigned int order)
168 unsigned int block_nr = req->nm_block_nr;
172 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
176 for (i = 0; i < block_nr; i++) {
177 pg_vec[i] = ptr = alloc_one_pg_vec_page(order);
178 if (pg_vec[i] == NULL)
184 free_pg_vec(pg_vec, order, block_nr);
188 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
189 bool closing, bool tx_ring)
191 struct netlink_sock *nlk = nlk_sk(sk);
192 struct netlink_ring *ring;
193 struct sk_buff_head *queue;
194 void **pg_vec = NULL;
195 unsigned int order = 0;
198 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
199 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
202 if (atomic_read(&nlk->mapped))
204 if (atomic_read(&ring->pending))
208 if (req->nm_block_nr) {
209 if (ring->pg_vec != NULL)
212 if ((int)req->nm_block_size <= 0)
214 if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
216 if (req->nm_frame_size < NL_MMAP_HDRLEN)
218 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
221 ring->frames_per_block = req->nm_block_size /
223 if (ring->frames_per_block == 0)
225 if (ring->frames_per_block * req->nm_block_nr !=
229 order = get_order(req->nm_block_size);
230 pg_vec = alloc_pg_vec(nlk, req, order);
234 if (req->nm_frame_nr)
239 mutex_lock(&nlk->pg_vec_lock);
240 if (closing || atomic_read(&nlk->mapped) == 0) {
242 spin_lock_bh(&queue->lock);
244 ring->frame_max = req->nm_frame_nr - 1;
246 ring->frame_size = req->nm_frame_size;
247 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
249 swap(ring->pg_vec_len, req->nm_block_nr);
250 swap(ring->pg_vec_order, order);
251 swap(ring->pg_vec, pg_vec);
253 __skb_queue_purge(queue);
254 spin_unlock_bh(&queue->lock);
256 WARN_ON(atomic_read(&nlk->mapped));
258 mutex_unlock(&nlk->pg_vec_lock);
261 free_pg_vec(pg_vec, order, req->nm_block_nr);
265 static void netlink_mm_open(struct vm_area_struct *vma)
267 struct file *file = vma->vm_file;
268 struct socket *sock = file->private_data;
269 struct sock *sk = sock->sk;
272 atomic_inc(&nlk_sk(sk)->mapped);
275 static void netlink_mm_close(struct vm_area_struct *vma)
277 struct file *file = vma->vm_file;
278 struct socket *sock = file->private_data;
279 struct sock *sk = sock->sk;
282 atomic_dec(&nlk_sk(sk)->mapped);
285 static const struct vm_operations_struct netlink_mmap_ops = {
286 .open = netlink_mm_open,
287 .close = netlink_mm_close,
290 static int netlink_mmap(struct file *file, struct socket *sock,
291 struct vm_area_struct *vma)
293 struct sock *sk = sock->sk;
294 struct netlink_sock *nlk = nlk_sk(sk);
295 struct netlink_ring *ring;
296 unsigned long start, size, expected;
303 mutex_lock(&nlk->pg_vec_lock);
306 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
307 if (ring->pg_vec == NULL)
309 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
315 size = vma->vm_end - vma->vm_start;
316 if (size != expected)
319 start = vma->vm_start;
320 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
321 if (ring->pg_vec == NULL)
324 for (i = 0; i < ring->pg_vec_len; i++) {
326 void *kaddr = ring->pg_vec[i];
329 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
330 page = pgvec_to_page(kaddr);
331 err = vm_insert_page(vma, start, page);
340 atomic_inc(&nlk->mapped);
341 vma->vm_ops = &netlink_mmap_ops;
344 mutex_unlock(&nlk->pg_vec_lock);
348 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
350 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
351 struct page *p_start, *p_end;
353 /* First page is flushed through netlink_{get,set}_status */
354 p_start = pgvec_to_page(hdr + PAGE_SIZE);
355 p_end = pgvec_to_page((void *)hdr + NL_MMAP_MSG_HDRLEN + hdr->nm_len - 1);
356 while (p_start <= p_end) {
357 flush_dcache_page(p_start);
363 static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
366 flush_dcache_page(pgvec_to_page(hdr));
367 return hdr->nm_status;
370 static void netlink_set_status(struct nl_mmap_hdr *hdr,
371 enum nl_mmap_status status)
373 hdr->nm_status = status;
374 flush_dcache_page(pgvec_to_page(hdr));
378 static struct nl_mmap_hdr *
379 __netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
381 unsigned int pg_vec_pos, frame_off;
383 pg_vec_pos = pos / ring->frames_per_block;
384 frame_off = pos % ring->frames_per_block;
386 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
389 static struct nl_mmap_hdr *
390 netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
391 enum nl_mmap_status status)
393 struct nl_mmap_hdr *hdr;
395 hdr = __netlink_lookup_frame(ring, pos);
396 if (netlink_get_status(hdr) != status)
402 static struct nl_mmap_hdr *
403 netlink_current_frame(const struct netlink_ring *ring,
404 enum nl_mmap_status status)
406 return netlink_lookup_frame(ring, ring->head, status);
409 static struct nl_mmap_hdr *
410 netlink_previous_frame(const struct netlink_ring *ring,
411 enum nl_mmap_status status)
415 prev = ring->head ? ring->head - 1 : ring->frame_max;
416 return netlink_lookup_frame(ring, prev, status);
419 static void netlink_increment_head(struct netlink_ring *ring)
421 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
424 static void netlink_forward_ring(struct netlink_ring *ring)
426 unsigned int head = ring->head, pos = head;
427 const struct nl_mmap_hdr *hdr;
430 hdr = __netlink_lookup_frame(ring, pos);
431 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
433 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
435 netlink_increment_head(ring);
436 } while (ring->head != head);
439 static unsigned int netlink_poll(struct file *file, struct socket *sock,
442 struct sock *sk = sock->sk;
443 struct netlink_sock *nlk = nlk_sk(sk);
446 if (nlk->cb != NULL && nlk->rx_ring.pg_vec != NULL)
449 mask = datagram_poll(file, sock, wait);
451 spin_lock_bh(&sk->sk_receive_queue.lock);
452 if (nlk->rx_ring.pg_vec) {
453 netlink_forward_ring(&nlk->rx_ring);
454 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
455 mask |= POLLIN | POLLRDNORM;
457 spin_unlock_bh(&sk->sk_receive_queue.lock);
459 spin_lock_bh(&sk->sk_write_queue.lock);
460 if (nlk->tx_ring.pg_vec) {
461 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
462 mask |= POLLOUT | POLLWRNORM;
464 spin_unlock_bh(&sk->sk_write_queue.lock);
469 static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
471 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
474 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
475 struct netlink_ring *ring,
476 struct nl_mmap_hdr *hdr)
481 size = ring->frame_size - NL_MMAP_HDRLEN;
482 data = (void *)hdr + NL_MMAP_HDRLEN;
486 skb_reset_tail_pointer(skb);
487 skb->end = skb->tail + size;
490 skb->destructor = netlink_skb_destructor;
491 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
492 NETLINK_CB(skb).sk = sk;
495 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
496 u32 dst_portid, u32 dst_group,
497 struct sock_iocb *siocb)
499 struct netlink_sock *nlk = nlk_sk(sk);
500 struct netlink_ring *ring;
501 struct nl_mmap_hdr *hdr;
505 int err = 0, len = 0;
507 /* Netlink messages are validated by the receiver before processing.
508 * In order to avoid userspace changing the contents of the message
509 * after validation, the socket and the ring may only be used by a
510 * single process, otherwise we fall back to copying.
512 if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
513 atomic_read(&nlk->mapped) > 1)
516 mutex_lock(&nlk->pg_vec_lock);
518 ring = &nlk->tx_ring;
519 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
522 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
524 if (!(msg->msg_flags & MSG_DONTWAIT) &&
525 atomic_read(&nlk->tx_ring.pending))
529 if (hdr->nm_len > maxlen) {
534 netlink_frame_flush_dcache(hdr);
536 if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
537 skb = alloc_skb_head(GFP_KERNEL);
543 netlink_ring_setup_skb(skb, sk, ring, hdr);
544 NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
545 __skb_put(skb, hdr->nm_len);
546 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
547 atomic_inc(&ring->pending);
549 skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
554 __skb_put(skb, hdr->nm_len);
555 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
556 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
559 netlink_increment_head(ring);
561 NETLINK_CB(skb).portid = nlk->portid;
562 NETLINK_CB(skb).dst_group = dst_group;
563 NETLINK_CB(skb).creds = siocb->scm->creds;
565 err = security_netlink_send(sk, skb);
571 if (unlikely(dst_group)) {
572 atomic_inc(&skb->users);
573 netlink_broadcast(sk, skb, dst_portid, dst_group,
576 err = netlink_unicast(sk, skb, dst_portid,
577 msg->msg_flags & MSG_DONTWAIT);
582 } while (hdr != NULL ||
583 (!(msg->msg_flags & MSG_DONTWAIT) &&
584 atomic_read(&nlk->tx_ring.pending)));
589 mutex_unlock(&nlk->pg_vec_lock);
592 #else /* CONFIG_NETLINK_MMAP */
593 #define netlink_skb_is_mmaped(skb) false
594 #define netlink_tx_is_mmaped(sk) false
595 #define netlink_mmap sock_no_mmap
596 #define netlink_poll datagram_poll
597 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
598 #endif /* CONFIG_NETLINK_MMAP */
600 static void netlink_destroy_callback(struct netlink_callback *cb)
606 static void netlink_consume_callback(struct netlink_callback *cb)
608 consume_skb(cb->skb);
612 static void netlink_skb_destructor(struct sk_buff *skb)
614 #ifdef CONFIG_NETLINK_MMAP
615 struct nl_mmap_hdr *hdr;
616 struct netlink_ring *ring;
619 /* If a packet from the kernel to userspace was freed because of an
620 * error without being delivered to userspace, the kernel must reset
621 * the status. In the direction userspace to kernel, the status is
622 * always reset here after the packet was processed and freed.
624 if (netlink_skb_is_mmaped(skb)) {
625 hdr = netlink_mmap_hdr(skb);
626 sk = NETLINK_CB(skb).sk;
628 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
629 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
630 ring = &nlk_sk(sk)->tx_ring;
632 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
634 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
636 ring = &nlk_sk(sk)->rx_ring;
639 WARN_ON(atomic_read(&ring->pending) == 0);
640 atomic_dec(&ring->pending);
650 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
652 WARN_ON(skb->sk != NULL);
654 skb->destructor = netlink_skb_destructor;
655 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
656 sk_mem_charge(sk, skb->truesize);
659 static void netlink_sock_destruct(struct sock *sk)
661 struct netlink_sock *nlk = nlk_sk(sk);
665 nlk->cb->done(nlk->cb);
667 module_put(nlk->cb->module);
668 netlink_destroy_callback(nlk->cb);
671 skb_queue_purge(&sk->sk_receive_queue);
672 #ifdef CONFIG_NETLINK_MMAP
674 struct nl_mmap_req req;
676 memset(&req, 0, sizeof(req));
677 if (nlk->rx_ring.pg_vec)
678 netlink_set_ring(sk, &req, true, false);
679 memset(&req, 0, sizeof(req));
680 if (nlk->tx_ring.pg_vec)
681 netlink_set_ring(sk, &req, true, true);
683 #endif /* CONFIG_NETLINK_MMAP */
685 if (!sock_flag(sk, SOCK_DEAD)) {
686 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
690 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
691 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
692 WARN_ON(nlk_sk(sk)->groups);
695 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
696 * SMP. Look, when several writers sleep and reader wakes them up, all but one
697 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
698 * this, _but_ remember, it adds useless work on UP machines.
701 void netlink_table_grab(void)
702 __acquires(nl_table_lock)
706 write_lock_irq(&nl_table_lock);
708 if (atomic_read(&nl_table_users)) {
709 DECLARE_WAITQUEUE(wait, current);
711 add_wait_queue_exclusive(&nl_table_wait, &wait);
713 set_current_state(TASK_UNINTERRUPTIBLE);
714 if (atomic_read(&nl_table_users) == 0)
716 write_unlock_irq(&nl_table_lock);
718 write_lock_irq(&nl_table_lock);
721 __set_current_state(TASK_RUNNING);
722 remove_wait_queue(&nl_table_wait, &wait);
726 void netlink_table_ungrab(void)
727 __releases(nl_table_lock)
729 write_unlock_irq(&nl_table_lock);
730 wake_up(&nl_table_wait);
734 netlink_lock_table(void)
736 /* read_lock() synchronizes us to netlink_table_grab */
738 read_lock(&nl_table_lock);
739 atomic_inc(&nl_table_users);
740 read_unlock(&nl_table_lock);
744 netlink_unlock_table(void)
746 if (atomic_dec_and_test(&nl_table_users))
747 wake_up(&nl_table_wait);
750 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
752 struct nl_portid_hash *hash = &nl_table[protocol].hash;
753 struct hlist_head *head;
756 read_lock(&nl_table_lock);
757 head = nl_portid_hashfn(hash, portid);
758 sk_for_each(sk, head) {
759 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
766 read_unlock(&nl_table_lock);
770 static struct hlist_head *nl_portid_hash_zalloc(size_t size)
772 if (size <= PAGE_SIZE)
773 return kzalloc(size, GFP_ATOMIC);
775 return (struct hlist_head *)
776 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
780 static void nl_portid_hash_free(struct hlist_head *table, size_t size)
782 if (size <= PAGE_SIZE)
785 free_pages((unsigned long)table, get_order(size));
788 static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
790 unsigned int omask, mask, shift;
792 struct hlist_head *otable, *table;
795 omask = mask = hash->mask;
796 osize = size = (mask + 1) * sizeof(*table);
800 if (++shift > hash->max_shift)
806 table = nl_portid_hash_zalloc(size);
810 otable = hash->table;
814 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
816 for (i = 0; i <= omask; i++) {
818 struct hlist_node *tmp;
820 sk_for_each_safe(sk, tmp, &otable[i])
821 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
824 nl_portid_hash_free(otable, osize);
825 hash->rehash_time = jiffies + 10 * 60 * HZ;
829 static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
831 int avg = hash->entries >> hash->shift;
833 if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
836 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
837 nl_portid_hash_rehash(hash, 0);
844 static const struct proto_ops netlink_ops;
847 netlink_update_listeners(struct sock *sk)
849 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
852 struct listeners *listeners;
854 listeners = nl_deref_protected(tbl->listeners);
858 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
860 sk_for_each_bound(sk, &tbl->mc_list) {
861 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
862 mask |= nlk_sk(sk)->groups[i];
864 listeners->masks[i] = mask;
866 /* this function is only called with the netlink table "grabbed", which
867 * makes sure updates are visible before bind or setsockopt return. */
870 static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
872 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
873 struct hlist_head *head;
874 int err = -EADDRINUSE;
878 netlink_table_grab();
879 head = nl_portid_hashfn(hash, portid);
881 sk_for_each(osk, head) {
882 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
890 if (nlk_sk(sk)->portid)
894 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
897 if (len && nl_portid_hash_dilute(hash, len))
898 head = nl_portid_hashfn(hash, portid);
900 nlk_sk(sk)->portid = portid;
901 sk_add_node(sk, head);
905 netlink_table_ungrab();
909 static void netlink_remove(struct sock *sk)
911 netlink_table_grab();
912 if (sk_del_node_init(sk))
913 nl_table[sk->sk_protocol].hash.entries--;
914 if (nlk_sk(sk)->subscriptions)
915 __sk_del_bind_node(sk);
916 netlink_table_ungrab();
919 static struct proto netlink_proto = {
921 .owner = THIS_MODULE,
922 .obj_size = sizeof(struct netlink_sock),
925 static int __netlink_create(struct net *net, struct socket *sock,
926 struct mutex *cb_mutex, int protocol)
929 struct netlink_sock *nlk;
931 sock->ops = &netlink_ops;
933 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
937 sock_init_data(sock, sk);
941 nlk->cb_mutex = cb_mutex;
943 nlk->cb_mutex = &nlk->cb_def_mutex;
944 mutex_init(nlk->cb_mutex);
946 init_waitqueue_head(&nlk->wait);
947 #ifdef CONFIG_NETLINK_MMAP
948 mutex_init(&nlk->pg_vec_lock);
951 sk->sk_destruct = netlink_sock_destruct;
952 sk->sk_protocol = protocol;
956 static int netlink_create(struct net *net, struct socket *sock, int protocol,
959 struct module *module = NULL;
960 struct mutex *cb_mutex;
961 struct netlink_sock *nlk;
962 void (*bind)(int group);
965 sock->state = SS_UNCONNECTED;
967 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
968 return -ESOCKTNOSUPPORT;
970 if (protocol < 0 || protocol >= MAX_LINKS)
971 return -EPROTONOSUPPORT;
973 netlink_lock_table();
974 #ifdef CONFIG_MODULES
975 if (!nl_table[protocol].registered) {
976 netlink_unlock_table();
977 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
978 netlink_lock_table();
981 if (nl_table[protocol].registered &&
982 try_module_get(nl_table[protocol].module))
983 module = nl_table[protocol].module;
985 err = -EPROTONOSUPPORT;
986 cb_mutex = nl_table[protocol].cb_mutex;
987 bind = nl_table[protocol].bind;
988 netlink_unlock_table();
993 err = __netlink_create(net, sock, cb_mutex, protocol);
998 sock_prot_inuse_add(net, &netlink_proto, 1);
1001 nlk = nlk_sk(sock->sk);
1002 nlk->module = module;
1003 nlk->netlink_bind = bind;
1012 static int netlink_release(struct socket *sock)
1014 struct sock *sk = sock->sk;
1015 struct netlink_sock *nlk;
1025 * OK. Socket is unlinked, any packets that arrive now
1030 wake_up_interruptible_all(&nlk->wait);
1032 skb_queue_purge(&sk->sk_write_queue);
1035 struct netlink_notify n = {
1036 .net = sock_net(sk),
1037 .protocol = sk->sk_protocol,
1038 .portid = nlk->portid,
1040 atomic_notifier_call_chain(&netlink_chain,
1041 NETLINK_URELEASE, &n);
1044 module_put(nlk->module);
1046 netlink_table_grab();
1047 if (netlink_is_kernel(sk)) {
1048 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1049 if (--nl_table[sk->sk_protocol].registered == 0) {
1050 struct listeners *old;
1052 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1053 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1054 kfree_rcu(old, rcu);
1055 nl_table[sk->sk_protocol].module = NULL;
1056 nl_table[sk->sk_protocol].bind = NULL;
1057 nl_table[sk->sk_protocol].flags = 0;
1058 nl_table[sk->sk_protocol].registered = 0;
1060 } else if (nlk->subscriptions) {
1061 netlink_update_listeners(sk);
1063 netlink_table_ungrab();
1069 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
1075 static int netlink_autobind(struct socket *sock)
1077 struct sock *sk = sock->sk;
1078 struct net *net = sock_net(sk);
1079 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
1080 struct hlist_head *head;
1082 s32 portid = task_tgid_vnr(current);
1084 static s32 rover = -4097;
1088 netlink_table_grab();
1089 head = nl_portid_hashfn(hash, portid);
1090 sk_for_each(osk, head) {
1091 if (!net_eq(sock_net(osk), net))
1093 if (nlk_sk(osk)->portid == portid) {
1094 /* Bind collision, search negative portid values. */
1098 netlink_table_ungrab();
1102 netlink_table_ungrab();
1104 err = netlink_insert(sk, net, portid);
1105 if (err == -EADDRINUSE)
1108 /* If 2 threads race to autobind, that is fine. */
1115 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
1117 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
1118 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
1122 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1124 struct netlink_sock *nlk = nlk_sk(sk);
1126 if (nlk->subscriptions && !subscriptions)
1127 __sk_del_bind_node(sk);
1128 else if (!nlk->subscriptions && subscriptions)
1129 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1130 nlk->subscriptions = subscriptions;
1133 static int netlink_realloc_groups(struct sock *sk)
1135 struct netlink_sock *nlk = nlk_sk(sk);
1136 unsigned int groups;
1137 unsigned long *new_groups;
1140 netlink_table_grab();
1142 groups = nl_table[sk->sk_protocol].groups;
1143 if (!nl_table[sk->sk_protocol].registered) {
1148 if (nlk->ngroups >= groups)
1151 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1152 if (new_groups == NULL) {
1156 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
1157 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1159 nlk->groups = new_groups;
1160 nlk->ngroups = groups;
1162 netlink_table_ungrab();
1166 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1169 struct sock *sk = sock->sk;
1170 struct net *net = sock_net(sk);
1171 struct netlink_sock *nlk = nlk_sk(sk);
1172 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1175 if (addr_len < sizeof(struct sockaddr_nl))
1178 if (nladdr->nl_family != AF_NETLINK)
1181 /* Only superuser is allowed to listen multicasts */
1182 if (nladdr->nl_groups) {
1183 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
1185 err = netlink_realloc_groups(sk);
1191 if (nladdr->nl_pid != nlk->portid)
1194 err = nladdr->nl_pid ?
1195 netlink_insert(sk, net, nladdr->nl_pid) :
1196 netlink_autobind(sock);
1201 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1204 netlink_table_grab();
1205 netlink_update_subscriptions(sk, nlk->subscriptions +
1206 hweight32(nladdr->nl_groups) -
1207 hweight32(nlk->groups[0]));
1208 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
1209 netlink_update_listeners(sk);
1210 netlink_table_ungrab();
1212 if (nlk->netlink_bind && nlk->groups[0]) {
1215 for (i=0; i<nlk->ngroups; i++) {
1216 if (test_bit(i, nlk->groups))
1217 nlk->netlink_bind(i);
1224 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1225 int alen, int flags)
1228 struct sock *sk = sock->sk;
1229 struct netlink_sock *nlk = nlk_sk(sk);
1230 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1232 if (alen < sizeof(addr->sa_family))
1235 if (addr->sa_family == AF_UNSPEC) {
1236 sk->sk_state = NETLINK_UNCONNECTED;
1237 nlk->dst_portid = 0;
1241 if (addr->sa_family != AF_NETLINK)
1244 /* Only superuser is allowed to send multicasts */
1245 if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
1249 err = netlink_autobind(sock);
1252 sk->sk_state = NETLINK_CONNECTED;
1253 nlk->dst_portid = nladdr->nl_pid;
1254 nlk->dst_group = ffs(nladdr->nl_groups);
1260 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1261 int *addr_len, int peer)
1263 struct sock *sk = sock->sk;
1264 struct netlink_sock *nlk = nlk_sk(sk);
1265 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1267 nladdr->nl_family = AF_NETLINK;
1269 *addr_len = sizeof(*nladdr);
1272 nladdr->nl_pid = nlk->dst_portid;
1273 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1275 nladdr->nl_pid = nlk->portid;
1276 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1281 static void netlink_overrun(struct sock *sk)
1283 struct netlink_sock *nlk = nlk_sk(sk);
1285 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
1286 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
1287 sk->sk_err = ENOBUFS;
1288 sk->sk_error_report(sk);
1291 atomic_inc(&sk->sk_drops);
1294 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1297 struct netlink_sock *nlk;
1299 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1301 return ERR_PTR(-ECONNREFUSED);
1303 /* Don't bother queuing skb if kernel socket has no input function */
1305 if (sock->sk_state == NETLINK_CONNECTED &&
1306 nlk->dst_portid != nlk_sk(ssk)->portid) {
1308 return ERR_PTR(-ECONNREFUSED);
1313 struct sock *netlink_getsockbyfilp(struct file *filp)
1315 struct inode *inode = file_inode(filp);
1318 if (!S_ISSOCK(inode->i_mode))
1319 return ERR_PTR(-ENOTSOCK);
1321 sock = SOCKET_I(inode)->sk;
1322 if (sock->sk_family != AF_NETLINK)
1323 return ERR_PTR(-EINVAL);
1330 * Attach a skb to a netlink socket.
1331 * The caller must hold a reference to the destination socket. On error, the
1332 * reference is dropped. The skb is not send to the destination, just all
1333 * all error checks are performed and memory in the queue is reserved.
1335 * < 0: error. skb freed, reference to sock dropped.
1337 * 1: repeat lookup - reference dropped while waiting for socket memory.
1339 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1340 long *timeo, struct sock *ssk)
1342 struct netlink_sock *nlk;
1346 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1347 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1348 !netlink_skb_is_mmaped(skb)) {
1349 DECLARE_WAITQUEUE(wait, current);
1351 if (!ssk || netlink_is_kernel(ssk))
1352 netlink_overrun(sk);
1358 __set_current_state(TASK_INTERRUPTIBLE);
1359 add_wait_queue(&nlk->wait, &wait);
1361 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1362 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1363 !sock_flag(sk, SOCK_DEAD))
1364 *timeo = schedule_timeout(*timeo);
1366 __set_current_state(TASK_RUNNING);
1367 remove_wait_queue(&nlk->wait, &wait);
1370 if (signal_pending(current)) {
1372 return sock_intr_errno(*timeo);
1376 netlink_skb_set_owner_r(skb, sk);
1380 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1384 skb_queue_tail(&sk->sk_receive_queue, skb);
1385 sk->sk_data_ready(sk, len);
1389 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1391 int len = __netlink_sendskb(sk, skb);
1397 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1403 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1407 WARN_ON(skb->sk != NULL);
1408 if (netlink_skb_is_mmaped(skb))
1411 delta = skb->end - skb->tail;
1412 if (delta * 2 < skb->truesize)
1415 if (skb_shared(skb)) {
1416 struct sk_buff *nskb = skb_clone(skb, allocation);
1423 if (!pskb_expand_head(skb, 0, -delta, allocation))
1424 skb->truesize -= delta;
1429 static void netlink_rcv_wake(struct sock *sk)
1431 struct netlink_sock *nlk = nlk_sk(sk);
1433 if (skb_queue_empty(&sk->sk_receive_queue))
1434 clear_bit(NETLINK_CONGESTED, &nlk->state);
1435 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
1436 wake_up_interruptible(&nlk->wait);
1439 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1443 struct netlink_sock *nlk = nlk_sk(sk);
1445 ret = -ECONNREFUSED;
1446 if (nlk->netlink_rcv != NULL) {
1448 netlink_skb_set_owner_r(skb, sk);
1449 NETLINK_CB(skb).sk = ssk;
1450 nlk->netlink_rcv(skb);
1459 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1460 u32 portid, int nonblock)
1466 skb = netlink_trim(skb, gfp_any());
1468 timeo = sock_sndtimeo(ssk, nonblock);
1470 sk = netlink_getsockbyportid(ssk, portid);
1475 if (netlink_is_kernel(sk))
1476 return netlink_unicast_kernel(sk, skb, ssk);
1478 if (sk_filter(sk, skb)) {
1485 err = netlink_attachskb(sk, skb, &timeo, ssk);
1491 return netlink_sendskb(sk, skb);
1493 EXPORT_SYMBOL(netlink_unicast);
1495 int netlink_has_listeners(struct sock *sk, unsigned int group)
1498 struct listeners *listeners;
1500 BUG_ON(!netlink_is_kernel(sk));
1503 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1505 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1506 res = test_bit(group - 1, listeners->masks);
1512 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1514 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1516 struct netlink_sock *nlk = nlk_sk(sk);
1518 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1519 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
1520 netlink_skb_set_owner_r(skb, sk);
1521 __netlink_sendskb(sk, skb);
1522 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1527 struct netlink_broadcast_data {
1528 struct sock *exclude_sk;
1533 int delivery_failure;
1537 struct sk_buff *skb, *skb2;
1538 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1542 static int do_one_broadcast(struct sock *sk,
1543 struct netlink_broadcast_data *p)
1545 struct netlink_sock *nlk = nlk_sk(sk);
1548 if (p->exclude_sk == sk)
1551 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1552 !test_bit(p->group - 1, nlk->groups))
1555 if (!net_eq(sock_net(sk), p->net))
1559 netlink_overrun(sk);
1564 if (p->skb2 == NULL) {
1565 if (skb_shared(p->skb)) {
1566 p->skb2 = skb_clone(p->skb, p->allocation);
1568 p->skb2 = skb_get(p->skb);
1570 * skb ownership may have been set when
1571 * delivered to a previous socket.
1573 skb_orphan(p->skb2);
1576 if (p->skb2 == NULL) {
1577 netlink_overrun(sk);
1578 /* Clone failed. Notify ALL listeners. */
1580 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1581 p->delivery_failure = 1;
1582 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1585 } else if (sk_filter(sk, p->skb2)) {
1588 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1589 netlink_overrun(sk);
1590 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1591 p->delivery_failure = 1;
1593 p->congested |= val;
1603 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1604 u32 group, gfp_t allocation,
1605 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1608 struct net *net = sock_net(ssk);
1609 struct netlink_broadcast_data info;
1612 skb = netlink_trim(skb, allocation);
1614 info.exclude_sk = ssk;
1616 info.portid = portid;
1619 info.delivery_failure = 0;
1622 info.allocation = allocation;
1625 info.tx_filter = filter;
1626 info.tx_data = filter_data;
1628 /* While we sleep in clone, do not allow to change socket list */
1630 netlink_lock_table();
1632 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1633 do_one_broadcast(sk, &info);
1637 netlink_unlock_table();
1639 if (info.delivery_failure) {
1640 kfree_skb(info.skb2);
1643 consume_skb(info.skb2);
1645 if (info.delivered) {
1646 if (info.congested && (allocation & __GFP_WAIT))
1652 EXPORT_SYMBOL(netlink_broadcast_filtered);
1654 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1655 u32 group, gfp_t allocation)
1657 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1660 EXPORT_SYMBOL(netlink_broadcast);
1662 struct netlink_set_err_data {
1663 struct sock *exclude_sk;
1669 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1671 struct netlink_sock *nlk = nlk_sk(sk);
1674 if (sk == p->exclude_sk)
1677 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1680 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1681 !test_bit(p->group - 1, nlk->groups))
1684 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1689 sk->sk_err = p->code;
1690 sk->sk_error_report(sk);
1696 * netlink_set_err - report error to broadcast listeners
1697 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1698 * @portid: the PORTID of a process that we want to skip (if any)
1699 * @groups: the broadcast group that will notice the error
1700 * @code: error code, must be negative (as usual in kernelspace)
1702 * This function returns the number of broadcast listeners that have set the
1703 * NETLINK_RECV_NO_ENOBUFS socket option.
1705 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1707 struct netlink_set_err_data info;
1711 info.exclude_sk = ssk;
1712 info.portid = portid;
1714 /* sk->sk_err wants a positive error value */
1717 read_lock(&nl_table_lock);
1719 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1720 ret += do_one_set_err(sk, &info);
1722 read_unlock(&nl_table_lock);
1725 EXPORT_SYMBOL(netlink_set_err);
1727 /* must be called with netlink table grabbed */
1728 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1732 int old, new = !!is_new, subscriptions;
1734 old = test_bit(group - 1, nlk->groups);
1735 subscriptions = nlk->subscriptions - old + new;
1737 __set_bit(group - 1, nlk->groups);
1739 __clear_bit(group - 1, nlk->groups);
1740 netlink_update_subscriptions(&nlk->sk, subscriptions);
1741 netlink_update_listeners(&nlk->sk);
1744 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1745 char __user *optval, unsigned int optlen)
1747 struct sock *sk = sock->sk;
1748 struct netlink_sock *nlk = nlk_sk(sk);
1749 unsigned int val = 0;
1752 if (level != SOL_NETLINK)
1753 return -ENOPROTOOPT;
1755 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
1756 optlen >= sizeof(int) &&
1757 get_user(val, (unsigned int __user *)optval))
1761 case NETLINK_PKTINFO:
1763 nlk->flags |= NETLINK_RECV_PKTINFO;
1765 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1768 case NETLINK_ADD_MEMBERSHIP:
1769 case NETLINK_DROP_MEMBERSHIP: {
1770 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
1772 err = netlink_realloc_groups(sk);
1775 if (!val || val - 1 >= nlk->ngroups)
1777 netlink_table_grab();
1778 netlink_update_socket_mc(nlk, val,
1779 optname == NETLINK_ADD_MEMBERSHIP);
1780 netlink_table_ungrab();
1782 if (nlk->netlink_bind)
1783 nlk->netlink_bind(val);
1788 case NETLINK_BROADCAST_ERROR:
1790 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
1792 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
1795 case NETLINK_NO_ENOBUFS:
1797 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
1798 clear_bit(NETLINK_CONGESTED, &nlk->state);
1799 wake_up_interruptible(&nlk->wait);
1801 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
1805 #ifdef CONFIG_NETLINK_MMAP
1806 case NETLINK_RX_RING:
1807 case NETLINK_TX_RING: {
1808 struct nl_mmap_req req;
1810 /* Rings might consume more memory than queue limits, require
1813 if (!capable(CAP_NET_ADMIN))
1815 if (optlen < sizeof(req))
1817 if (copy_from_user(&req, optval, sizeof(req)))
1819 err = netlink_set_ring(sk, &req, false,
1820 optname == NETLINK_TX_RING);
1823 #endif /* CONFIG_NETLINK_MMAP */
1830 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1831 char __user *optval, int __user *optlen)
1833 struct sock *sk = sock->sk;
1834 struct netlink_sock *nlk = nlk_sk(sk);
1837 if (level != SOL_NETLINK)
1838 return -ENOPROTOOPT;
1840 if (get_user(len, optlen))
1846 case NETLINK_PKTINFO:
1847 if (len < sizeof(int))
1850 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1851 if (put_user(len, optlen) ||
1852 put_user(val, optval))
1856 case NETLINK_BROADCAST_ERROR:
1857 if (len < sizeof(int))
1860 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
1861 if (put_user(len, optlen) ||
1862 put_user(val, optval))
1866 case NETLINK_NO_ENOBUFS:
1867 if (len < sizeof(int))
1870 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
1871 if (put_user(len, optlen) ||
1872 put_user(val, optval))
1882 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1884 struct nl_pktinfo info;
1886 info.group = NETLINK_CB(skb).dst_group;
1887 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1890 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1891 struct msghdr *msg, size_t len)
1893 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1894 struct sock *sk = sock->sk;
1895 struct netlink_sock *nlk = nlk_sk(sk);
1896 struct sockaddr_nl *addr = msg->msg_name;
1899 struct sk_buff *skb;
1901 struct scm_cookie scm;
1903 if (msg->msg_flags&MSG_OOB)
1906 if (NULL == siocb->scm)
1909 err = scm_send(sock, msg, siocb->scm, true);
1913 if (msg->msg_namelen) {
1915 if (addr->nl_family != AF_NETLINK)
1917 dst_portid = addr->nl_pid;
1918 dst_group = ffs(addr->nl_groups);
1920 if ((dst_group || dst_portid) &&
1921 !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
1924 dst_portid = nlk->dst_portid;
1925 dst_group = nlk->dst_group;
1929 err = netlink_autobind(sock);
1934 if (netlink_tx_is_mmaped(sk) &&
1935 msg->msg_iov->iov_base == NULL) {
1936 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
1942 if (len > sk->sk_sndbuf - 32)
1945 skb = alloc_skb(len, GFP_KERNEL);
1949 NETLINK_CB(skb).portid = nlk->portid;
1950 NETLINK_CB(skb).dst_group = dst_group;
1951 NETLINK_CB(skb).creds = siocb->scm->creds;
1954 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1959 err = security_netlink_send(sk, skb);
1966 atomic_inc(&skb->users);
1967 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1969 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1972 scm_destroy(siocb->scm);
1976 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1977 struct msghdr *msg, size_t len,
1980 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1981 struct scm_cookie scm;
1982 struct sock *sk = sock->sk;
1983 struct netlink_sock *nlk = nlk_sk(sk);
1984 int noblock = flags&MSG_DONTWAIT;
1986 struct sk_buff *skb, *data_skb;
1994 skb = skb_recv_datagram(sk, flags, noblock, &err);
2000 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2001 if (unlikely(skb_shinfo(skb)->frag_list)) {
2003 * If this skb has a frag_list, then here that means that we
2004 * will have to use the frag_list skb's data for compat tasks
2005 * and the regular skb's data for normal (non-compat) tasks.
2007 * If we need to send the compat skb, assign it to the
2008 * 'data_skb' variable so that it will be used below for data
2009 * copying. We keep 'skb' for everything else, including
2010 * freeing both later.
2012 if (flags & MSG_CMSG_COMPAT)
2013 data_skb = skb_shinfo(skb)->frag_list;
2017 msg->msg_namelen = 0;
2019 copied = data_skb->len;
2021 msg->msg_flags |= MSG_TRUNC;
2025 skb_reset_transport_header(data_skb);
2026 err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
2028 if (msg->msg_name) {
2029 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
2030 addr->nl_family = AF_NETLINK;
2032 addr->nl_pid = NETLINK_CB(skb).portid;
2033 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
2034 msg->msg_namelen = sizeof(*addr);
2037 if (nlk->flags & NETLINK_RECV_PKTINFO)
2038 netlink_cmsg_recv_pktinfo(msg, skb);
2040 if (NULL == siocb->scm) {
2041 memset(&scm, 0, sizeof(scm));
2044 siocb->scm->creds = *NETLINK_CREDS(skb);
2045 if (flags & MSG_TRUNC)
2046 copied = data_skb->len;
2048 skb_free_datagram(sk, skb);
2050 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2051 ret = netlink_dump(sk);
2054 sk->sk_error_report(sk);
2058 scm_recv(sock, msg, siocb->scm, flags);
2060 netlink_rcv_wake(sk);
2061 return err ? : copied;
2064 static void netlink_data_ready(struct sock *sk, int len)
2070 * We export these functions to other modules. They provide a
2071 * complete set of kernel non-blocking support for message
2076 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2077 struct netlink_kernel_cfg *cfg)
2079 struct socket *sock;
2081 struct netlink_sock *nlk;
2082 struct listeners *listeners = NULL;
2083 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2084 unsigned int groups;
2088 if (unit < 0 || unit >= MAX_LINKS)
2091 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2095 * We have to just have a reference on the net from sk, but don't
2096 * get_net it. Besides, we cannot get and then put the net here.
2097 * So we create one inside init_net and the move it to net.
2100 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2101 goto out_sock_release_nosk;
2104 sk_change_net(sk, net);
2106 if (!cfg || cfg->groups < 32)
2109 groups = cfg->groups;
2111 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2113 goto out_sock_release;
2115 sk->sk_data_ready = netlink_data_ready;
2116 if (cfg && cfg->input)
2117 nlk_sk(sk)->netlink_rcv = cfg->input;
2119 if (netlink_insert(sk, net, 0))
2120 goto out_sock_release;
2123 nlk->flags |= NETLINK_KERNEL_SOCKET;
2125 netlink_table_grab();
2126 if (!nl_table[unit].registered) {
2127 nl_table[unit].groups = groups;
2128 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2129 nl_table[unit].cb_mutex = cb_mutex;
2130 nl_table[unit].module = module;
2132 nl_table[unit].bind = cfg->bind;
2133 nl_table[unit].flags = cfg->flags;
2135 nl_table[unit].registered = 1;
2138 nl_table[unit].registered++;
2140 netlink_table_ungrab();
2145 netlink_kernel_release(sk);
2148 out_sock_release_nosk:
2152 EXPORT_SYMBOL(__netlink_kernel_create);
2155 netlink_kernel_release(struct sock *sk)
2157 sk_release_kernel(sk);
2159 EXPORT_SYMBOL(netlink_kernel_release);
2161 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2163 struct listeners *new, *old;
2164 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2169 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2170 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2173 old = nl_deref_protected(tbl->listeners);
2174 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2175 rcu_assign_pointer(tbl->listeners, new);
2177 kfree_rcu(old, rcu);
2179 tbl->groups = groups;
2185 * netlink_change_ngroups - change number of multicast groups
2187 * This changes the number of multicast groups that are available
2188 * on a certain netlink family. Note that it is not possible to
2189 * change the number of groups to below 32. Also note that it does
2190 * not implicitly call netlink_clear_multicast_users() when the
2191 * number of groups is reduced.
2193 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2194 * @groups: The new number of groups.
2196 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2200 netlink_table_grab();
2201 err = __netlink_change_ngroups(sk, groups);
2202 netlink_table_ungrab();
2207 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2210 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2212 sk_for_each_bound(sk, &tbl->mc_list)
2213 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2217 * netlink_clear_multicast_users - kick off multicast listeners
2219 * This function removes all listeners from the given group.
2220 * @ksk: The kernel netlink socket, as returned by
2221 * netlink_kernel_create().
2222 * @group: The multicast group to clear.
2224 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2226 netlink_table_grab();
2227 __netlink_clear_multicast_users(ksk, group);
2228 netlink_table_ungrab();
2232 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2234 struct nlmsghdr *nlh;
2235 int size = nlmsg_msg_size(len);
2237 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
2238 nlh->nlmsg_type = type;
2239 nlh->nlmsg_len = size;
2240 nlh->nlmsg_flags = flags;
2241 nlh->nlmsg_pid = portid;
2242 nlh->nlmsg_seq = seq;
2243 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2244 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2247 EXPORT_SYMBOL(__nlmsg_put);
2250 * It looks a bit ugly.
2251 * It would be better to create kernel thread.
2254 static int netlink_dump(struct sock *sk)
2256 struct netlink_sock *nlk = nlk_sk(sk);
2257 struct netlink_callback *cb;
2258 struct sk_buff *skb = NULL;
2259 struct nlmsghdr *nlh;
2260 int len, err = -ENOBUFS;
2263 mutex_lock(nlk->cb_mutex);
2271 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2273 skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
2277 len = cb->dump(skb, cb);
2280 mutex_unlock(nlk->cb_mutex);
2282 if (sk_filter(sk, skb))
2285 __netlink_sendskb(sk, skb);
2289 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2293 nl_dump_check_consistent(cb, nlh);
2295 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2297 if (sk_filter(sk, skb))
2300 __netlink_sendskb(sk, skb);
2305 mutex_unlock(nlk->cb_mutex);
2307 module_put(cb->module);
2308 netlink_consume_callback(cb);
2312 mutex_unlock(nlk->cb_mutex);
2317 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2318 const struct nlmsghdr *nlh,
2319 struct netlink_dump_control *control)
2321 struct netlink_callback *cb;
2323 struct netlink_sock *nlk;
2326 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
2330 cb->dump = control->dump;
2331 cb->done = control->done;
2333 cb->data = control->data;
2334 cb->module = control->module;
2335 cb->min_dump_alloc = control->min_dump_alloc;
2336 atomic_inc(&skb->users);
2339 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2341 netlink_destroy_callback(cb);
2342 return -ECONNREFUSED;
2346 mutex_lock(nlk->cb_mutex);
2347 /* A dump is in progress... */
2349 mutex_unlock(nlk->cb_mutex);
2350 netlink_destroy_callback(cb);
2354 /* add reference of module which cb->dump belongs to */
2355 if (!try_module_get(cb->module)) {
2356 mutex_unlock(nlk->cb_mutex);
2357 netlink_destroy_callback(cb);
2358 ret = -EPROTONOSUPPORT;
2363 mutex_unlock(nlk->cb_mutex);
2365 ret = netlink_dump(sk);
2372 /* We successfully started a dump, by returning -EINTR we
2373 * signal not to send ACK even if it was requested.
2377 EXPORT_SYMBOL(__netlink_dump_start);
2379 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2381 struct sk_buff *skb;
2382 struct nlmsghdr *rep;
2383 struct nlmsgerr *errmsg;
2384 size_t payload = sizeof(*errmsg);
2386 /* error messages get the original request appened */
2388 payload += nlmsg_len(nlh);
2390 skb = nlmsg_new(payload, GFP_KERNEL);
2394 sk = netlink_lookup(sock_net(in_skb->sk),
2395 in_skb->sk->sk_protocol,
2396 NETLINK_CB(in_skb).portid);
2398 sk->sk_err = ENOBUFS;
2399 sk->sk_error_report(sk);
2405 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2406 NLMSG_ERROR, payload, 0);
2407 errmsg = nlmsg_data(rep);
2408 errmsg->error = err;
2409 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
2410 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2412 EXPORT_SYMBOL(netlink_ack);
2414 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2417 struct nlmsghdr *nlh;
2420 while (skb->len >= nlmsg_total_size(0)) {
2423 nlh = nlmsg_hdr(skb);
2426 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2429 /* Only requests are handled by the kernel */
2430 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2433 /* Skip control messages */
2434 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2442 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2443 netlink_ack(skb, nlh, err);
2446 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2447 if (msglen > skb->len)
2449 skb_pull(skb, msglen);
2454 EXPORT_SYMBOL(netlink_rcv_skb);
2457 * nlmsg_notify - send a notification netlink message
2458 * @sk: netlink socket to use
2459 * @skb: notification message
2460 * @portid: destination netlink portid for reports or 0
2461 * @group: destination multicast group or 0
2462 * @report: 1 to report back, 0 to disable
2463 * @flags: allocation flags
2465 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2466 unsigned int group, int report, gfp_t flags)
2471 int exclude_portid = 0;
2474 atomic_inc(&skb->users);
2475 exclude_portid = portid;
2478 /* errors reported via destination sk->sk_err, but propagate
2479 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2480 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2486 err2 = nlmsg_unicast(sk, skb, portid);
2487 if (!err || err == -ESRCH)
2493 EXPORT_SYMBOL(nlmsg_notify);
2495 #ifdef CONFIG_PROC_FS
2496 struct nl_seq_iter {
2497 struct seq_net_private p;
2502 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2504 struct nl_seq_iter *iter = seq->private;
2509 for (i = 0; i < MAX_LINKS; i++) {
2510 struct nl_portid_hash *hash = &nl_table[i].hash;
2512 for (j = 0; j <= hash->mask; j++) {
2513 sk_for_each(s, &hash->table[j]) {
2514 if (sock_net(s) != seq_file_net(seq))
2528 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
2529 __acquires(nl_table_lock)
2531 read_lock(&nl_table_lock);
2532 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2535 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2538 struct nl_seq_iter *iter;
2543 if (v == SEQ_START_TOKEN)
2544 return netlink_seq_socket_idx(seq, 0);
2546 iter = seq->private;
2550 } while (s && sock_net(s) != seq_file_net(seq));
2555 j = iter->hash_idx + 1;
2558 struct nl_portid_hash *hash = &nl_table[i].hash;
2560 for (; j <= hash->mask; j++) {
2561 s = sk_head(&hash->table[j]);
2562 while (s && sock_net(s) != seq_file_net(seq))
2572 } while (++i < MAX_LINKS);
2577 static void netlink_seq_stop(struct seq_file *seq, void *v)
2578 __releases(nl_table_lock)
2580 read_unlock(&nl_table_lock);
2584 static int netlink_seq_show(struct seq_file *seq, void *v)
2586 if (v == SEQ_START_TOKEN) {
2588 "sk Eth Pid Groups "
2589 "Rmem Wmem Dump Locks Drops Inode\n");
2592 struct netlink_sock *nlk = nlk_sk(s);
2594 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2598 nlk->groups ? (u32)nlk->groups[0] : 0,
2599 sk_rmem_alloc_get(s),
2600 sk_wmem_alloc_get(s),
2602 atomic_read(&s->sk_refcnt),
2603 atomic_read(&s->sk_drops),
2611 static const struct seq_operations netlink_seq_ops = {
2612 .start = netlink_seq_start,
2613 .next = netlink_seq_next,
2614 .stop = netlink_seq_stop,
2615 .show = netlink_seq_show,
2619 static int netlink_seq_open(struct inode *inode, struct file *file)
2621 return seq_open_net(inode, file, &netlink_seq_ops,
2622 sizeof(struct nl_seq_iter));
2625 static const struct file_operations netlink_seq_fops = {
2626 .owner = THIS_MODULE,
2627 .open = netlink_seq_open,
2629 .llseek = seq_lseek,
2630 .release = seq_release_net,
2635 int netlink_register_notifier(struct notifier_block *nb)
2637 return atomic_notifier_chain_register(&netlink_chain, nb);
2639 EXPORT_SYMBOL(netlink_register_notifier);
2641 int netlink_unregister_notifier(struct notifier_block *nb)
2643 return atomic_notifier_chain_unregister(&netlink_chain, nb);
2645 EXPORT_SYMBOL(netlink_unregister_notifier);
2647 static const struct proto_ops netlink_ops = {
2648 .family = PF_NETLINK,
2649 .owner = THIS_MODULE,
2650 .release = netlink_release,
2651 .bind = netlink_bind,
2652 .connect = netlink_connect,
2653 .socketpair = sock_no_socketpair,
2654 .accept = sock_no_accept,
2655 .getname = netlink_getname,
2656 .poll = netlink_poll,
2657 .ioctl = sock_no_ioctl,
2658 .listen = sock_no_listen,
2659 .shutdown = sock_no_shutdown,
2660 .setsockopt = netlink_setsockopt,
2661 .getsockopt = netlink_getsockopt,
2662 .sendmsg = netlink_sendmsg,
2663 .recvmsg = netlink_recvmsg,
2664 .mmap = netlink_mmap,
2665 .sendpage = sock_no_sendpage,
2668 static const struct net_proto_family netlink_family_ops = {
2669 .family = PF_NETLINK,
2670 .create = netlink_create,
2671 .owner = THIS_MODULE, /* for consistency 8) */
2674 static int __net_init netlink_net_init(struct net *net)
2676 #ifdef CONFIG_PROC_FS
2677 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
2683 static void __net_exit netlink_net_exit(struct net *net)
2685 #ifdef CONFIG_PROC_FS
2686 remove_proc_entry("netlink", net->proc_net);
2690 static void __init netlink_add_usersock_entry(void)
2692 struct listeners *listeners;
2695 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2697 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2699 netlink_table_grab();
2701 nl_table[NETLINK_USERSOCK].groups = groups;
2702 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2703 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2704 nl_table[NETLINK_USERSOCK].registered = 1;
2705 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2707 netlink_table_ungrab();
2710 static struct pernet_operations __net_initdata netlink_net_ops = {
2711 .init = netlink_net_init,
2712 .exit = netlink_net_exit,
2715 static int __init netlink_proto_init(void)
2718 unsigned long limit;
2720 int err = proto_register(&netlink_proto, 0);
2725 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2727 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2731 if (totalram_pages >= (128 * 1024))
2732 limit = totalram_pages >> (21 - PAGE_SHIFT);
2734 limit = totalram_pages >> (23 - PAGE_SHIFT);
2736 order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
2737 limit = (1UL << order) / sizeof(struct hlist_head);
2738 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
2740 for (i = 0; i < MAX_LINKS; i++) {
2741 struct nl_portid_hash *hash = &nl_table[i].hash;
2743 hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
2746 nl_portid_hash_free(nl_table[i].hash.table,
2747 1 * sizeof(*hash->table));
2751 hash->max_shift = order;
2754 hash->rehash_time = jiffies;
2757 netlink_add_usersock_entry();
2759 sock_register(&netlink_family_ops);
2760 register_pernet_subsys(&netlink_net_ops);
2761 /* The netlink device handler may be needed early. */
2766 panic("netlink_init: Cannot allocate nl_table\n");
2769 core_initcall(netlink_proto_init);