2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 * Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
25 #include <linux/module.h>
27 #include <linux/capability.h>
28 #include <linux/kernel.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/errno.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/socket.h>
37 #include <linux/fcntl.h>
38 #include <linux/termios.h>
39 #include <linux/sockios.h>
40 #include <linux/net.h>
42 #include <linux/slab.h>
43 #include <asm/uaccess.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
58 #include <linux/mutex.h>
59 #include <linux/vmalloc.h>
60 #include <linux/if_arp.h>
61 #include <linux/rhashtable.h>
62 #include <asm/cacheflush.h>
63 #include <linux/hash.h>
65 #include <net/net_namespace.h>
68 #include <net/netlink.h>
70 #include "af_netlink.h"
74 unsigned long masks[0];
78 #define NETLINK_CONGESTED 0x0
81 #define NETLINK_KERNEL_SOCKET 0x1
82 #define NETLINK_RECV_PKTINFO 0x2
83 #define NETLINK_BROADCAST_SEND_ERROR 0x4
84 #define NETLINK_RECV_NO_ENOBUFS 0x8
86 static inline int netlink_is_kernel(struct sock *sk)
88 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
91 struct netlink_table *nl_table;
92 EXPORT_SYMBOL_GPL(nl_table);
94 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
96 static int netlink_dump(struct sock *sk);
97 static void netlink_skb_destructor(struct sk_buff *skb);
99 /* nl_table locking explained:
100 * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
101 * combined with an RCU read-side lock. Insertion and removal are protected
102 * with nl_sk_hash_lock while using RCU list modification primitives and may
103 * run in parallel to nl_table_lock protected lookups. Destruction of the
104 * Netlink socket may only occur *after* nl_table_lock has been acquired
105 * either during or after the socket has been removed from the list.
107 DEFINE_RWLOCK(nl_table_lock);
108 EXPORT_SYMBOL_GPL(nl_table_lock);
109 static atomic_t nl_table_users = ATOMIC_INIT(0);
111 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
113 /* Protects netlink socket hash table mutations */
114 DEFINE_MUTEX(nl_sk_hash_lock);
115 EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
117 #ifdef CONFIG_PROVE_LOCKING
118 static int lockdep_nl_sk_hash_is_held(void *parent)
121 return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
126 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
128 static DEFINE_SPINLOCK(netlink_tap_lock);
129 static struct list_head netlink_tap_all __read_mostly;
131 static inline u32 netlink_group_mask(u32 group)
133 return group ? 1 << (group - 1) : 0;
136 int netlink_add_tap(struct netlink_tap *nt)
138 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
141 spin_lock(&netlink_tap_lock);
142 list_add_rcu(&nt->list, &netlink_tap_all);
143 spin_unlock(&netlink_tap_lock);
145 __module_get(nt->module);
149 EXPORT_SYMBOL_GPL(netlink_add_tap);
151 static int __netlink_remove_tap(struct netlink_tap *nt)
154 struct netlink_tap *tmp;
156 spin_lock(&netlink_tap_lock);
158 list_for_each_entry(tmp, &netlink_tap_all, list) {
160 list_del_rcu(&nt->list);
166 pr_warn("__netlink_remove_tap: %p not found\n", nt);
168 spin_unlock(&netlink_tap_lock);
170 if (found && nt->module)
171 module_put(nt->module);
173 return found ? 0 : -ENODEV;
176 int netlink_remove_tap(struct netlink_tap *nt)
180 ret = __netlink_remove_tap(nt);
185 EXPORT_SYMBOL_GPL(netlink_remove_tap);
187 static bool netlink_filter_tap(const struct sk_buff *skb)
189 struct sock *sk = skb->sk;
191 /* We take the more conservative approach and
192 * whitelist socket protocols that may pass.
194 switch (sk->sk_protocol) {
196 case NETLINK_USERSOCK:
197 case NETLINK_SOCK_DIAG:
200 case NETLINK_FIB_LOOKUP:
201 case NETLINK_NETFILTER:
202 case NETLINK_GENERIC:
209 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
210 struct net_device *dev)
212 struct sk_buff *nskb;
213 struct sock *sk = skb->sk;
217 nskb = skb_clone(skb, GFP_ATOMIC);
220 nskb->protocol = htons((u16) sk->sk_protocol);
221 nskb->pkt_type = netlink_is_kernel(sk) ?
222 PACKET_KERNEL : PACKET_USER;
223 skb_reset_network_header(nskb);
224 ret = dev_queue_xmit(nskb);
225 if (unlikely(ret > 0))
226 ret = net_xmit_errno(ret);
233 static void __netlink_deliver_tap(struct sk_buff *skb)
236 struct netlink_tap *tmp;
238 if (!netlink_filter_tap(skb))
241 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
242 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
248 static void netlink_deliver_tap(struct sk_buff *skb)
252 if (unlikely(!list_empty(&netlink_tap_all)))
253 __netlink_deliver_tap(skb);
258 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
261 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
262 netlink_deliver_tap(skb);
265 static void netlink_overrun(struct sock *sk)
267 struct netlink_sock *nlk = nlk_sk(sk);
269 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
270 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
271 sk->sk_err = ENOBUFS;
272 sk->sk_error_report(sk);
275 atomic_inc(&sk->sk_drops);
278 static void netlink_rcv_wake(struct sock *sk)
280 struct netlink_sock *nlk = nlk_sk(sk);
282 if (skb_queue_empty(&sk->sk_receive_queue))
283 clear_bit(NETLINK_CONGESTED, &nlk->state);
284 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
285 wake_up_interruptible(&nlk->wait);
288 #ifdef CONFIG_NETLINK_MMAP
289 static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
291 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
294 static bool netlink_rx_is_mmaped(struct sock *sk)
296 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
299 static bool netlink_tx_is_mmaped(struct sock *sk)
301 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
304 static __pure struct page *pgvec_to_page(const void *addr)
306 if (is_vmalloc_addr(addr))
307 return vmalloc_to_page(addr);
309 return virt_to_page(addr);
312 static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
316 for (i = 0; i < len; i++) {
317 if (pg_vec[i] != NULL) {
318 if (is_vmalloc_addr(pg_vec[i]))
321 free_pages((unsigned long)pg_vec[i], order);
327 static void *alloc_one_pg_vec_page(unsigned long order)
330 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
331 __GFP_NOWARN | __GFP_NORETRY;
333 buffer = (void *)__get_free_pages(gfp_flags, order);
337 buffer = vzalloc((1 << order) * PAGE_SIZE);
341 gfp_flags &= ~__GFP_NORETRY;
342 return (void *)__get_free_pages(gfp_flags, order);
345 static void **alloc_pg_vec(struct netlink_sock *nlk,
346 struct nl_mmap_req *req, unsigned int order)
348 unsigned int block_nr = req->nm_block_nr;
352 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
356 for (i = 0; i < block_nr; i++) {
357 pg_vec[i] = alloc_one_pg_vec_page(order);
358 if (pg_vec[i] == NULL)
364 free_pg_vec(pg_vec, order, block_nr);
368 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
369 bool closing, bool tx_ring)
371 struct netlink_sock *nlk = nlk_sk(sk);
372 struct netlink_ring *ring;
373 struct sk_buff_head *queue;
374 void **pg_vec = NULL;
375 unsigned int order = 0;
378 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
379 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
382 if (atomic_read(&nlk->mapped))
384 if (atomic_read(&ring->pending))
388 if (req->nm_block_nr) {
389 if (ring->pg_vec != NULL)
392 if ((int)req->nm_block_size <= 0)
394 if (!PAGE_ALIGNED(req->nm_block_size))
396 if (req->nm_frame_size < NL_MMAP_HDRLEN)
398 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
401 ring->frames_per_block = req->nm_block_size /
403 if (ring->frames_per_block == 0)
405 if (ring->frames_per_block * req->nm_block_nr !=
409 order = get_order(req->nm_block_size);
410 pg_vec = alloc_pg_vec(nlk, req, order);
414 if (req->nm_frame_nr)
419 mutex_lock(&nlk->pg_vec_lock);
420 if (closing || atomic_read(&nlk->mapped) == 0) {
422 spin_lock_bh(&queue->lock);
424 ring->frame_max = req->nm_frame_nr - 1;
426 ring->frame_size = req->nm_frame_size;
427 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
429 swap(ring->pg_vec_len, req->nm_block_nr);
430 swap(ring->pg_vec_order, order);
431 swap(ring->pg_vec, pg_vec);
433 __skb_queue_purge(queue);
434 spin_unlock_bh(&queue->lock);
436 WARN_ON(atomic_read(&nlk->mapped));
438 mutex_unlock(&nlk->pg_vec_lock);
441 free_pg_vec(pg_vec, order, req->nm_block_nr);
445 static void netlink_mm_open(struct vm_area_struct *vma)
447 struct file *file = vma->vm_file;
448 struct socket *sock = file->private_data;
449 struct sock *sk = sock->sk;
452 atomic_inc(&nlk_sk(sk)->mapped);
455 static void netlink_mm_close(struct vm_area_struct *vma)
457 struct file *file = vma->vm_file;
458 struct socket *sock = file->private_data;
459 struct sock *sk = sock->sk;
462 atomic_dec(&nlk_sk(sk)->mapped);
465 static const struct vm_operations_struct netlink_mmap_ops = {
466 .open = netlink_mm_open,
467 .close = netlink_mm_close,
470 static int netlink_mmap(struct file *file, struct socket *sock,
471 struct vm_area_struct *vma)
473 struct sock *sk = sock->sk;
474 struct netlink_sock *nlk = nlk_sk(sk);
475 struct netlink_ring *ring;
476 unsigned long start, size, expected;
483 mutex_lock(&nlk->pg_vec_lock);
486 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
487 if (ring->pg_vec == NULL)
489 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
495 size = vma->vm_end - vma->vm_start;
496 if (size != expected)
499 start = vma->vm_start;
500 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
501 if (ring->pg_vec == NULL)
504 for (i = 0; i < ring->pg_vec_len; i++) {
506 void *kaddr = ring->pg_vec[i];
509 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
510 page = pgvec_to_page(kaddr);
511 err = vm_insert_page(vma, start, page);
520 atomic_inc(&nlk->mapped);
521 vma->vm_ops = &netlink_mmap_ops;
524 mutex_unlock(&nlk->pg_vec_lock);
528 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
530 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
531 struct page *p_start, *p_end;
533 /* First page is flushed through netlink_{get,set}_status */
534 p_start = pgvec_to_page(hdr + PAGE_SIZE);
535 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
536 while (p_start <= p_end) {
537 flush_dcache_page(p_start);
543 static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
546 flush_dcache_page(pgvec_to_page(hdr));
547 return hdr->nm_status;
550 static void netlink_set_status(struct nl_mmap_hdr *hdr,
551 enum nl_mmap_status status)
554 hdr->nm_status = status;
555 flush_dcache_page(pgvec_to_page(hdr));
558 static struct nl_mmap_hdr *
559 __netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
561 unsigned int pg_vec_pos, frame_off;
563 pg_vec_pos = pos / ring->frames_per_block;
564 frame_off = pos % ring->frames_per_block;
566 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
569 static struct nl_mmap_hdr *
570 netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
571 enum nl_mmap_status status)
573 struct nl_mmap_hdr *hdr;
575 hdr = __netlink_lookup_frame(ring, pos);
576 if (netlink_get_status(hdr) != status)
582 static struct nl_mmap_hdr *
583 netlink_current_frame(const struct netlink_ring *ring,
584 enum nl_mmap_status status)
586 return netlink_lookup_frame(ring, ring->head, status);
589 static struct nl_mmap_hdr *
590 netlink_previous_frame(const struct netlink_ring *ring,
591 enum nl_mmap_status status)
595 prev = ring->head ? ring->head - 1 : ring->frame_max;
596 return netlink_lookup_frame(ring, prev, status);
599 static void netlink_increment_head(struct netlink_ring *ring)
601 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
604 static void netlink_forward_ring(struct netlink_ring *ring)
606 unsigned int head = ring->head, pos = head;
607 const struct nl_mmap_hdr *hdr;
610 hdr = __netlink_lookup_frame(ring, pos);
611 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
613 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
615 netlink_increment_head(ring);
616 } while (ring->head != head);
619 static bool netlink_dump_space(struct netlink_sock *nlk)
621 struct netlink_ring *ring = &nlk->rx_ring;
622 struct nl_mmap_hdr *hdr;
625 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
629 n = ring->head + ring->frame_max / 2;
630 if (n > ring->frame_max)
631 n -= ring->frame_max;
633 hdr = __netlink_lookup_frame(ring, n);
635 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
638 static unsigned int netlink_poll(struct file *file, struct socket *sock,
641 struct sock *sk = sock->sk;
642 struct netlink_sock *nlk = nlk_sk(sk);
646 if (nlk->rx_ring.pg_vec != NULL) {
647 /* Memory mapped sockets don't call recvmsg(), so flow control
648 * for dumps is performed here. A dump is allowed to continue
649 * if at least half the ring is unused.
651 while (nlk->cb_running && netlink_dump_space(nlk)) {
652 err = netlink_dump(sk);
655 sk->sk_error_report(sk);
659 netlink_rcv_wake(sk);
662 mask = datagram_poll(file, sock, wait);
664 spin_lock_bh(&sk->sk_receive_queue.lock);
665 if (nlk->rx_ring.pg_vec) {
666 netlink_forward_ring(&nlk->rx_ring);
667 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
668 mask |= POLLIN | POLLRDNORM;
670 spin_unlock_bh(&sk->sk_receive_queue.lock);
672 spin_lock_bh(&sk->sk_write_queue.lock);
673 if (nlk->tx_ring.pg_vec) {
674 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
675 mask |= POLLOUT | POLLWRNORM;
677 spin_unlock_bh(&sk->sk_write_queue.lock);
682 static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
684 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
687 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
688 struct netlink_ring *ring,
689 struct nl_mmap_hdr *hdr)
694 size = ring->frame_size - NL_MMAP_HDRLEN;
695 data = (void *)hdr + NL_MMAP_HDRLEN;
699 skb_reset_tail_pointer(skb);
700 skb->end = skb->tail + size;
703 skb->destructor = netlink_skb_destructor;
704 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
705 NETLINK_CB(skb).sk = sk;
708 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
709 u32 dst_portid, u32 dst_group,
710 struct sock_iocb *siocb)
712 struct netlink_sock *nlk = nlk_sk(sk);
713 struct netlink_ring *ring;
714 struct nl_mmap_hdr *hdr;
717 int err = 0, len = 0;
719 mutex_lock(&nlk->pg_vec_lock);
721 ring = &nlk->tx_ring;
722 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
727 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
729 if (!(msg->msg_flags & MSG_DONTWAIT) &&
730 atomic_read(&nlk->tx_ring.pending))
735 nm_len = ACCESS_ONCE(hdr->nm_len);
736 if (nm_len > maxlen) {
741 netlink_frame_flush_dcache(hdr, nm_len);
743 skb = alloc_skb(nm_len, GFP_KERNEL);
748 __skb_put(skb, nm_len);
749 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
750 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
752 netlink_increment_head(ring);
754 NETLINK_CB(skb).portid = nlk->portid;
755 NETLINK_CB(skb).dst_group = dst_group;
756 NETLINK_CB(skb).creds = siocb->scm->creds;
758 err = security_netlink_send(sk, skb);
764 if (unlikely(dst_group)) {
765 atomic_inc(&skb->users);
766 netlink_broadcast(sk, skb, dst_portid, dst_group,
769 err = netlink_unicast(sk, skb, dst_portid,
770 msg->msg_flags & MSG_DONTWAIT);
775 } while (hdr != NULL ||
776 (!(msg->msg_flags & MSG_DONTWAIT) &&
777 atomic_read(&nlk->tx_ring.pending)));
782 mutex_unlock(&nlk->pg_vec_lock);
786 static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
788 struct nl_mmap_hdr *hdr;
790 hdr = netlink_mmap_hdr(skb);
791 hdr->nm_len = skb->len;
792 hdr->nm_group = NETLINK_CB(skb).dst_group;
793 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
794 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
795 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
796 netlink_frame_flush_dcache(hdr, hdr->nm_len);
797 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
799 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
803 static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
805 struct netlink_sock *nlk = nlk_sk(sk);
806 struct netlink_ring *ring = &nlk->rx_ring;
807 struct nl_mmap_hdr *hdr;
809 spin_lock_bh(&sk->sk_receive_queue.lock);
810 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
812 spin_unlock_bh(&sk->sk_receive_queue.lock);
817 netlink_increment_head(ring);
818 __skb_queue_tail(&sk->sk_receive_queue, skb);
819 spin_unlock_bh(&sk->sk_receive_queue.lock);
821 hdr->nm_len = skb->len;
822 hdr->nm_group = NETLINK_CB(skb).dst_group;
823 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
824 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
825 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
826 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
829 #else /* CONFIG_NETLINK_MMAP */
830 #define netlink_skb_is_mmaped(skb) false
831 #define netlink_rx_is_mmaped(sk) false
832 #define netlink_tx_is_mmaped(sk) false
833 #define netlink_mmap sock_no_mmap
834 #define netlink_poll datagram_poll
835 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
836 #endif /* CONFIG_NETLINK_MMAP */
838 static void netlink_skb_destructor(struct sk_buff *skb)
840 #ifdef CONFIG_NETLINK_MMAP
841 struct nl_mmap_hdr *hdr;
842 struct netlink_ring *ring;
845 /* If a packet from the kernel to userspace was freed because of an
846 * error without being delivered to userspace, the kernel must reset
847 * the status. In the direction userspace to kernel, the status is
848 * always reset here after the packet was processed and freed.
850 if (netlink_skb_is_mmaped(skb)) {
851 hdr = netlink_mmap_hdr(skb);
852 sk = NETLINK_CB(skb).sk;
854 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
855 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
856 ring = &nlk_sk(sk)->tx_ring;
858 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
860 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
862 ring = &nlk_sk(sk)->rx_ring;
865 WARN_ON(atomic_read(&ring->pending) == 0);
866 atomic_dec(&ring->pending);
872 if (is_vmalloc_addr(skb->head)) {
874 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
883 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
885 WARN_ON(skb->sk != NULL);
887 skb->destructor = netlink_skb_destructor;
888 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
889 sk_mem_charge(sk, skb->truesize);
892 static void netlink_sock_destruct(struct sock *sk)
894 struct netlink_sock *nlk = nlk_sk(sk);
896 if (nlk->cb_running) {
898 nlk->cb.done(&nlk->cb);
900 module_put(nlk->cb.module);
901 kfree_skb(nlk->cb.skb);
904 skb_queue_purge(&sk->sk_receive_queue);
905 #ifdef CONFIG_NETLINK_MMAP
907 struct nl_mmap_req req;
909 memset(&req, 0, sizeof(req));
910 if (nlk->rx_ring.pg_vec)
911 netlink_set_ring(sk, &req, true, false);
912 memset(&req, 0, sizeof(req));
913 if (nlk->tx_ring.pg_vec)
914 netlink_set_ring(sk, &req, true, true);
916 #endif /* CONFIG_NETLINK_MMAP */
918 if (!sock_flag(sk, SOCK_DEAD)) {
919 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
923 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
924 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
925 WARN_ON(nlk_sk(sk)->groups);
928 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
929 * SMP. Look, when several writers sleep and reader wakes them up, all but one
930 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
931 * this, _but_ remember, it adds useless work on UP machines.
934 void netlink_table_grab(void)
935 __acquires(nl_table_lock)
939 write_lock_irq(&nl_table_lock);
941 if (atomic_read(&nl_table_users)) {
942 DECLARE_WAITQUEUE(wait, current);
944 add_wait_queue_exclusive(&nl_table_wait, &wait);
946 set_current_state(TASK_UNINTERRUPTIBLE);
947 if (atomic_read(&nl_table_users) == 0)
949 write_unlock_irq(&nl_table_lock);
951 write_lock_irq(&nl_table_lock);
954 __set_current_state(TASK_RUNNING);
955 remove_wait_queue(&nl_table_wait, &wait);
959 void netlink_table_ungrab(void)
960 __releases(nl_table_lock)
962 write_unlock_irq(&nl_table_lock);
963 wake_up(&nl_table_wait);
967 netlink_lock_table(void)
969 /* read_lock() synchronizes us to netlink_table_grab */
971 read_lock(&nl_table_lock);
972 atomic_inc(&nl_table_users);
973 read_unlock(&nl_table_lock);
977 netlink_unlock_table(void)
979 if (atomic_dec_and_test(&nl_table_users))
980 wake_up(&nl_table_wait);
983 struct netlink_compare_arg
989 static bool netlink_compare(void *ptr, void *arg)
991 struct netlink_compare_arg *x = arg;
992 struct sock *sk = ptr;
994 return nlk_sk(sk)->portid == x->portid &&
995 net_eq(sock_net(sk), x->net);
998 static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1001 struct netlink_compare_arg arg = {
1007 hash = rhashtable_hashfn(&table->hash, &portid, sizeof(portid));
1009 return rhashtable_lookup_compare(&table->hash, hash,
1010 &netlink_compare, &arg);
1013 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1015 struct netlink_table *table = &nl_table[protocol];
1018 read_lock(&nl_table_lock);
1020 sk = __netlink_lookup(table, portid, net);
1024 read_unlock(&nl_table_lock);
1029 static const struct proto_ops netlink_ops;
1032 netlink_update_listeners(struct sock *sk)
1034 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1037 struct listeners *listeners;
1039 listeners = nl_deref_protected(tbl->listeners);
1043 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
1045 sk_for_each_bound(sk, &tbl->mc_list) {
1046 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1047 mask |= nlk_sk(sk)->groups[i];
1049 listeners->masks[i] = mask;
1051 /* this function is only called with the netlink table "grabbed", which
1052 * makes sure updates are visible before bind or setsockopt return. */
1055 static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
1057 struct netlink_table *table = &nl_table[sk->sk_protocol];
1058 int err = -EADDRINUSE;
1060 mutex_lock(&nl_sk_hash_lock);
1061 if (__netlink_lookup(table, portid, net))
1065 if (nlk_sk(sk)->portid)
1069 if (BITS_PER_LONG > 32 && unlikely(table->hash.nelems >= UINT_MAX))
1072 nlk_sk(sk)->portid = portid;
1074 rhashtable_insert(&table->hash, &nlk_sk(sk)->node);
1077 mutex_unlock(&nl_sk_hash_lock);
1081 static void netlink_remove(struct sock *sk)
1083 struct netlink_table *table;
1085 mutex_lock(&nl_sk_hash_lock);
1086 table = &nl_table[sk->sk_protocol];
1087 if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
1088 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1091 mutex_unlock(&nl_sk_hash_lock);
1093 netlink_table_grab();
1094 if (nlk_sk(sk)->subscriptions) {
1095 __sk_del_bind_node(sk);
1096 netlink_update_listeners(sk);
1098 netlink_table_ungrab();
1101 static struct proto netlink_proto = {
1103 .owner = THIS_MODULE,
1104 .obj_size = sizeof(struct netlink_sock),
1107 static int __netlink_create(struct net *net, struct socket *sock,
1108 struct mutex *cb_mutex, int protocol)
1111 struct netlink_sock *nlk;
1113 sock->ops = &netlink_ops;
1115 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
1119 sock_init_data(sock, sk);
1123 nlk->cb_mutex = cb_mutex;
1125 nlk->cb_mutex = &nlk->cb_def_mutex;
1126 mutex_init(nlk->cb_mutex);
1128 init_waitqueue_head(&nlk->wait);
1129 #ifdef CONFIG_NETLINK_MMAP
1130 mutex_init(&nlk->pg_vec_lock);
1133 sk->sk_destruct = netlink_sock_destruct;
1134 sk->sk_protocol = protocol;
1138 static int netlink_create(struct net *net, struct socket *sock, int protocol,
1141 struct module *module = NULL;
1142 struct mutex *cb_mutex;
1143 struct netlink_sock *nlk;
1144 int (*bind)(int group);
1145 void (*unbind)(int group);
1148 sock->state = SS_UNCONNECTED;
1150 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1151 return -ESOCKTNOSUPPORT;
1153 if (protocol < 0 || protocol >= MAX_LINKS)
1154 return -EPROTONOSUPPORT;
1156 netlink_lock_table();
1157 #ifdef CONFIG_MODULES
1158 if (!nl_table[protocol].registered) {
1159 netlink_unlock_table();
1160 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
1161 netlink_lock_table();
1164 if (nl_table[protocol].registered &&
1165 try_module_get(nl_table[protocol].module))
1166 module = nl_table[protocol].module;
1168 err = -EPROTONOSUPPORT;
1169 cb_mutex = nl_table[protocol].cb_mutex;
1170 bind = nl_table[protocol].bind;
1171 unbind = nl_table[protocol].unbind;
1172 netlink_unlock_table();
1177 err = __netlink_create(net, sock, cb_mutex, protocol);
1182 sock_prot_inuse_add(net, &netlink_proto, 1);
1185 nlk = nlk_sk(sock->sk);
1186 nlk->module = module;
1187 nlk->netlink_bind = bind;
1188 nlk->netlink_unbind = unbind;
1197 static int netlink_release(struct socket *sock)
1199 struct sock *sk = sock->sk;
1200 struct netlink_sock *nlk;
1210 * OK. Socket is unlinked, any packets that arrive now
1215 wake_up_interruptible_all(&nlk->wait);
1217 skb_queue_purge(&sk->sk_write_queue);
1220 struct netlink_notify n = {
1221 .net = sock_net(sk),
1222 .protocol = sk->sk_protocol,
1223 .portid = nlk->portid,
1225 atomic_notifier_call_chain(&netlink_chain,
1226 NETLINK_URELEASE, &n);
1229 module_put(nlk->module);
1231 if (netlink_is_kernel(sk)) {
1232 netlink_table_grab();
1233 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1234 if (--nl_table[sk->sk_protocol].registered == 0) {
1235 struct listeners *old;
1237 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1238 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1239 kfree_rcu(old, rcu);
1240 nl_table[sk->sk_protocol].module = NULL;
1241 nl_table[sk->sk_protocol].bind = NULL;
1242 nl_table[sk->sk_protocol].unbind = NULL;
1243 nl_table[sk->sk_protocol].flags = 0;
1244 nl_table[sk->sk_protocol].registered = 0;
1246 netlink_table_ungrab();
1249 if (nlk->netlink_unbind) {
1252 for (i = 0; i < nlk->ngroups; i++)
1253 if (test_bit(i, nlk->groups))
1254 nlk->netlink_unbind(i + 1);
1260 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
1266 static int netlink_autobind(struct socket *sock)
1268 struct sock *sk = sock->sk;
1269 struct net *net = sock_net(sk);
1270 struct netlink_table *table = &nl_table[sk->sk_protocol];
1271 s32 portid = task_tgid_vnr(current);
1273 static s32 rover = -4097;
1277 netlink_table_grab();
1279 if (__netlink_lookup(table, portid, net)) {
1280 /* Bind collision, search negative portid values. */
1285 netlink_table_ungrab();
1289 netlink_table_ungrab();
1291 err = netlink_insert(sk, net, portid);
1292 if (err == -EADDRINUSE)
1295 /* If 2 threads race to autobind, that is fine. */
1303 * __netlink_ns_capable - General netlink message capability test
1304 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1305 * @user_ns: The user namespace of the capability to use
1306 * @cap: The capability to use
1308 * Test to see if the opener of the socket we received the message
1309 * from had when the netlink socket was created and the sender of the
1310 * message has has the capability @cap in the user namespace @user_ns.
1312 bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1313 struct user_namespace *user_ns, int cap)
1315 return ((nsp->flags & NETLINK_SKB_DST) ||
1316 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1317 ns_capable(user_ns, cap);
1319 EXPORT_SYMBOL(__netlink_ns_capable);
1322 * netlink_ns_capable - General netlink message capability test
1323 * @skb: socket buffer holding a netlink command from userspace
1324 * @user_ns: The user namespace of the capability to use
1325 * @cap: The capability to use
1327 * Test to see if the opener of the socket we received the message
1328 * from had when the netlink socket was created and the sender of the
1329 * message has has the capability @cap in the user namespace @user_ns.
1331 bool netlink_ns_capable(const struct sk_buff *skb,
1332 struct user_namespace *user_ns, int cap)
1334 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1336 EXPORT_SYMBOL(netlink_ns_capable);
1339 * netlink_capable - Netlink global message capability test
1340 * @skb: socket buffer holding a netlink command from userspace
1341 * @cap: The capability to use
1343 * Test to see if the opener of the socket we received the message
1344 * from had when the netlink socket was created and the sender of the
1345 * message has has the capability @cap in all user namespaces.
1347 bool netlink_capable(const struct sk_buff *skb, int cap)
1349 return netlink_ns_capable(skb, &init_user_ns, cap);
1351 EXPORT_SYMBOL(netlink_capable);
1354 * netlink_net_capable - Netlink network namespace message capability test
1355 * @skb: socket buffer holding a netlink command from userspace
1356 * @cap: The capability to use
1358 * Test to see if the opener of the socket we received the message
1359 * from had when the netlink socket was created and the sender of the
1360 * message has has the capability @cap over the network namespace of
1361 * the socket we received the message from.
1363 bool netlink_net_capable(const struct sk_buff *skb, int cap)
1365 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1367 EXPORT_SYMBOL(netlink_net_capable);
1369 static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
1371 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
1372 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
1376 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1378 struct netlink_sock *nlk = nlk_sk(sk);
1380 if (nlk->subscriptions && !subscriptions)
1381 __sk_del_bind_node(sk);
1382 else if (!nlk->subscriptions && subscriptions)
1383 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1384 nlk->subscriptions = subscriptions;
1387 static int netlink_realloc_groups(struct sock *sk)
1389 struct netlink_sock *nlk = nlk_sk(sk);
1390 unsigned int groups;
1391 unsigned long *new_groups;
1394 netlink_table_grab();
1396 groups = nl_table[sk->sk_protocol].groups;
1397 if (!nl_table[sk->sk_protocol].registered) {
1402 if (nlk->ngroups >= groups)
1405 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1406 if (new_groups == NULL) {
1410 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
1411 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1413 nlk->groups = new_groups;
1414 nlk->ngroups = groups;
1416 netlink_table_ungrab();
1420 static void netlink_undo_bind(int group, long unsigned int groups,
1421 struct netlink_sock *nlk)
1425 if (!nlk->netlink_unbind)
1428 for (undo = 0; undo < group; undo++)
1429 if (test_bit(undo, &groups))
1430 nlk->netlink_unbind(undo);
1433 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1436 struct sock *sk = sock->sk;
1437 struct net *net = sock_net(sk);
1438 struct netlink_sock *nlk = nlk_sk(sk);
1439 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1441 long unsigned int groups = nladdr->nl_groups;
1443 if (addr_len < sizeof(struct sockaddr_nl))
1446 if (nladdr->nl_family != AF_NETLINK)
1449 /* Only superuser is allowed to listen multicasts */
1451 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1453 err = netlink_realloc_groups(sk);
1459 if (nladdr->nl_pid != nlk->portid)
1462 if (nlk->netlink_bind && groups) {
1465 for (group = 0; group < nlk->ngroups; group++) {
1466 if (!test_bit(group, &groups))
1468 err = nlk->netlink_bind(group);
1471 netlink_undo_bind(group, groups, nlk);
1477 err = nladdr->nl_pid ?
1478 netlink_insert(sk, net, nladdr->nl_pid) :
1479 netlink_autobind(sock);
1481 netlink_undo_bind(nlk->ngroups, groups, nlk);
1486 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1489 netlink_table_grab();
1490 netlink_update_subscriptions(sk, nlk->subscriptions +
1492 hweight32(nlk->groups[0]));
1493 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1494 netlink_update_listeners(sk);
1495 netlink_table_ungrab();
1500 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1501 int alen, int flags)
1504 struct sock *sk = sock->sk;
1505 struct netlink_sock *nlk = nlk_sk(sk);
1506 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1508 if (alen < sizeof(addr->sa_family))
1511 if (addr->sa_family == AF_UNSPEC) {
1512 sk->sk_state = NETLINK_UNCONNECTED;
1513 nlk->dst_portid = 0;
1517 if (addr->sa_family != AF_NETLINK)
1520 if ((nladdr->nl_groups || nladdr->nl_pid) &&
1521 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1525 err = netlink_autobind(sock);
1528 sk->sk_state = NETLINK_CONNECTED;
1529 nlk->dst_portid = nladdr->nl_pid;
1530 nlk->dst_group = ffs(nladdr->nl_groups);
1536 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1537 int *addr_len, int peer)
1539 struct sock *sk = sock->sk;
1540 struct netlink_sock *nlk = nlk_sk(sk);
1541 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1543 nladdr->nl_family = AF_NETLINK;
1545 *addr_len = sizeof(*nladdr);
1548 nladdr->nl_pid = nlk->dst_portid;
1549 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1551 nladdr->nl_pid = nlk->portid;
1552 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1557 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1560 struct netlink_sock *nlk;
1562 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1564 return ERR_PTR(-ECONNREFUSED);
1566 /* Don't bother queuing skb if kernel socket has no input function */
1568 if (sock->sk_state == NETLINK_CONNECTED &&
1569 nlk->dst_portid != nlk_sk(ssk)->portid) {
1571 return ERR_PTR(-ECONNREFUSED);
1576 struct sock *netlink_getsockbyfilp(struct file *filp)
1578 struct inode *inode = file_inode(filp);
1581 if (!S_ISSOCK(inode->i_mode))
1582 return ERR_PTR(-ENOTSOCK);
1584 sock = SOCKET_I(inode)->sk;
1585 if (sock->sk_family != AF_NETLINK)
1586 return ERR_PTR(-EINVAL);
1592 static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1595 struct sk_buff *skb;
1598 if (size <= NLMSG_GOODSIZE || broadcast)
1599 return alloc_skb(size, GFP_KERNEL);
1601 size = SKB_DATA_ALIGN(size) +
1602 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1604 data = vmalloc(size);
1608 skb = build_skb(data, size);
1613 skb->destructor = netlink_skb_destructor;
1620 * Attach a skb to a netlink socket.
1621 * The caller must hold a reference to the destination socket. On error, the
1622 * reference is dropped. The skb is not send to the destination, just all
1623 * all error checks are performed and memory in the queue is reserved.
1625 * < 0: error. skb freed, reference to sock dropped.
1627 * 1: repeat lookup - reference dropped while waiting for socket memory.
1629 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1630 long *timeo, struct sock *ssk)
1632 struct netlink_sock *nlk;
1636 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1637 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1638 !netlink_skb_is_mmaped(skb)) {
1639 DECLARE_WAITQUEUE(wait, current);
1641 if (!ssk || netlink_is_kernel(ssk))
1642 netlink_overrun(sk);
1648 __set_current_state(TASK_INTERRUPTIBLE);
1649 add_wait_queue(&nlk->wait, &wait);
1651 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1652 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1653 !sock_flag(sk, SOCK_DEAD))
1654 *timeo = schedule_timeout(*timeo);
1656 __set_current_state(TASK_RUNNING);
1657 remove_wait_queue(&nlk->wait, &wait);
1660 if (signal_pending(current)) {
1662 return sock_intr_errno(*timeo);
1666 netlink_skb_set_owner_r(skb, sk);
1670 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1674 netlink_deliver_tap(skb);
1676 #ifdef CONFIG_NETLINK_MMAP
1677 if (netlink_skb_is_mmaped(skb))
1678 netlink_queue_mmaped_skb(sk, skb);
1679 else if (netlink_rx_is_mmaped(sk))
1680 netlink_ring_set_copied(sk, skb);
1682 #endif /* CONFIG_NETLINK_MMAP */
1683 skb_queue_tail(&sk->sk_receive_queue, skb);
1684 sk->sk_data_ready(sk);
1688 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1690 int len = __netlink_sendskb(sk, skb);
1696 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1702 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1706 WARN_ON(skb->sk != NULL);
1707 if (netlink_skb_is_mmaped(skb))
1710 delta = skb->end - skb->tail;
1711 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1714 if (skb_shared(skb)) {
1715 struct sk_buff *nskb = skb_clone(skb, allocation);
1722 if (!pskb_expand_head(skb, 0, -delta, allocation))
1723 skb->truesize -= delta;
1728 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1732 struct netlink_sock *nlk = nlk_sk(sk);
1734 ret = -ECONNREFUSED;
1735 if (nlk->netlink_rcv != NULL) {
1737 netlink_skb_set_owner_r(skb, sk);
1738 NETLINK_CB(skb).sk = ssk;
1739 netlink_deliver_tap_kernel(sk, ssk, skb);
1740 nlk->netlink_rcv(skb);
1749 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1750 u32 portid, int nonblock)
1756 skb = netlink_trim(skb, gfp_any());
1758 timeo = sock_sndtimeo(ssk, nonblock);
1760 sk = netlink_getsockbyportid(ssk, portid);
1765 if (netlink_is_kernel(sk))
1766 return netlink_unicast_kernel(sk, skb, ssk);
1768 if (sk_filter(sk, skb)) {
1775 err = netlink_attachskb(sk, skb, &timeo, ssk);
1781 return netlink_sendskb(sk, skb);
1783 EXPORT_SYMBOL(netlink_unicast);
1785 struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1786 u32 dst_portid, gfp_t gfp_mask)
1788 #ifdef CONFIG_NETLINK_MMAP
1789 struct sock *sk = NULL;
1790 struct sk_buff *skb;
1791 struct netlink_ring *ring;
1792 struct nl_mmap_hdr *hdr;
1793 unsigned int maxlen;
1795 sk = netlink_getsockbyportid(ssk, dst_portid);
1799 ring = &nlk_sk(sk)->rx_ring;
1800 /* fast-path without atomic ops for common case: non-mmaped receiver */
1801 if (ring->pg_vec == NULL)
1804 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1807 skb = alloc_skb_head(gfp_mask);
1811 spin_lock_bh(&sk->sk_receive_queue.lock);
1812 /* check again under lock */
1813 if (ring->pg_vec == NULL)
1816 /* check again under lock */
1817 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1821 netlink_forward_ring(ring);
1822 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1825 netlink_ring_setup_skb(skb, sk, ring, hdr);
1826 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1827 atomic_inc(&ring->pending);
1828 netlink_increment_head(ring);
1830 spin_unlock_bh(&sk->sk_receive_queue.lock);
1835 spin_unlock_bh(&sk->sk_receive_queue.lock);
1836 netlink_overrun(sk);
1843 spin_unlock_bh(&sk->sk_receive_queue.lock);
1848 return alloc_skb(size, gfp_mask);
1850 EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1852 int netlink_has_listeners(struct sock *sk, unsigned int group)
1855 struct listeners *listeners;
1857 BUG_ON(!netlink_is_kernel(sk));
1860 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1862 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1863 res = test_bit(group - 1, listeners->masks);
1869 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1871 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1873 struct netlink_sock *nlk = nlk_sk(sk);
1875 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1876 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
1877 netlink_skb_set_owner_r(skb, sk);
1878 __netlink_sendskb(sk, skb);
1879 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1884 struct netlink_broadcast_data {
1885 struct sock *exclude_sk;
1890 int delivery_failure;
1894 struct sk_buff *skb, *skb2;
1895 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1899 static void do_one_broadcast(struct sock *sk,
1900 struct netlink_broadcast_data *p)
1902 struct netlink_sock *nlk = nlk_sk(sk);
1905 if (p->exclude_sk == sk)
1908 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1909 !test_bit(p->group - 1, nlk->groups))
1912 if (!net_eq(sock_net(sk), p->net))
1916 netlink_overrun(sk);
1921 if (p->skb2 == NULL) {
1922 if (skb_shared(p->skb)) {
1923 p->skb2 = skb_clone(p->skb, p->allocation);
1925 p->skb2 = skb_get(p->skb);
1927 * skb ownership may have been set when
1928 * delivered to a previous socket.
1930 skb_orphan(p->skb2);
1933 if (p->skb2 == NULL) {
1934 netlink_overrun(sk);
1935 /* Clone failed. Notify ALL listeners. */
1937 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1938 p->delivery_failure = 1;
1939 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1942 } else if (sk_filter(sk, p->skb2)) {
1945 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1946 netlink_overrun(sk);
1947 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1948 p->delivery_failure = 1;
1950 p->congested |= val;
1957 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1958 u32 group, gfp_t allocation,
1959 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1962 struct net *net = sock_net(ssk);
1963 struct netlink_broadcast_data info;
1966 skb = netlink_trim(skb, allocation);
1968 info.exclude_sk = ssk;
1970 info.portid = portid;
1973 info.delivery_failure = 0;
1976 info.allocation = allocation;
1979 info.tx_filter = filter;
1980 info.tx_data = filter_data;
1982 /* While we sleep in clone, do not allow to change socket list */
1984 netlink_lock_table();
1986 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1987 do_one_broadcast(sk, &info);
1991 netlink_unlock_table();
1993 if (info.delivery_failure) {
1994 kfree_skb(info.skb2);
1997 consume_skb(info.skb2);
1999 if (info.delivered) {
2000 if (info.congested && (allocation & __GFP_WAIT))
2006 EXPORT_SYMBOL(netlink_broadcast_filtered);
2008 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
2009 u32 group, gfp_t allocation)
2011 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
2014 EXPORT_SYMBOL(netlink_broadcast);
2016 struct netlink_set_err_data {
2017 struct sock *exclude_sk;
2023 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
2025 struct netlink_sock *nlk = nlk_sk(sk);
2028 if (sk == p->exclude_sk)
2031 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
2034 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
2035 !test_bit(p->group - 1, nlk->groups))
2038 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2043 sk->sk_err = p->code;
2044 sk->sk_error_report(sk);
2050 * netlink_set_err - report error to broadcast listeners
2051 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2052 * @portid: the PORTID of a process that we want to skip (if any)
2053 * @group: the broadcast group that will notice the error
2054 * @code: error code, must be negative (as usual in kernelspace)
2056 * This function returns the number of broadcast listeners that have set the
2057 * NETLINK_RECV_NO_ENOBUFS socket option.
2059 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
2061 struct netlink_set_err_data info;
2065 info.exclude_sk = ssk;
2066 info.portid = portid;
2068 /* sk->sk_err wants a positive error value */
2071 read_lock(&nl_table_lock);
2073 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2074 ret += do_one_set_err(sk, &info);
2076 read_unlock(&nl_table_lock);
2079 EXPORT_SYMBOL(netlink_set_err);
2081 /* must be called with netlink table grabbed */
2082 static void netlink_update_socket_mc(struct netlink_sock *nlk,
2086 int old, new = !!is_new, subscriptions;
2088 old = test_bit(group - 1, nlk->groups);
2089 subscriptions = nlk->subscriptions - old + new;
2091 __set_bit(group - 1, nlk->groups);
2093 __clear_bit(group - 1, nlk->groups);
2094 netlink_update_subscriptions(&nlk->sk, subscriptions);
2095 netlink_update_listeners(&nlk->sk);
2098 static int netlink_setsockopt(struct socket *sock, int level, int optname,
2099 char __user *optval, unsigned int optlen)
2101 struct sock *sk = sock->sk;
2102 struct netlink_sock *nlk = nlk_sk(sk);
2103 unsigned int val = 0;
2106 if (level != SOL_NETLINK)
2107 return -ENOPROTOOPT;
2109 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2110 optlen >= sizeof(int) &&
2111 get_user(val, (unsigned int __user *)optval))
2115 case NETLINK_PKTINFO:
2117 nlk->flags |= NETLINK_RECV_PKTINFO;
2119 nlk->flags &= ~NETLINK_RECV_PKTINFO;
2122 case NETLINK_ADD_MEMBERSHIP:
2123 case NETLINK_DROP_MEMBERSHIP: {
2124 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
2126 err = netlink_realloc_groups(sk);
2129 if (!val || val - 1 >= nlk->ngroups)
2131 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
2132 err = nlk->netlink_bind(val);
2136 netlink_table_grab();
2137 netlink_update_socket_mc(nlk, val,
2138 optname == NETLINK_ADD_MEMBERSHIP);
2139 netlink_table_ungrab();
2140 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
2141 nlk->netlink_unbind(val);
2146 case NETLINK_BROADCAST_ERROR:
2148 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2150 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2153 case NETLINK_NO_ENOBUFS:
2155 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
2156 clear_bit(NETLINK_CONGESTED, &nlk->state);
2157 wake_up_interruptible(&nlk->wait);
2159 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
2163 #ifdef CONFIG_NETLINK_MMAP
2164 case NETLINK_RX_RING:
2165 case NETLINK_TX_RING: {
2166 struct nl_mmap_req req;
2168 /* Rings might consume more memory than queue limits, require
2171 if (!capable(CAP_NET_ADMIN))
2173 if (optlen < sizeof(req))
2175 if (copy_from_user(&req, optval, sizeof(req)))
2177 err = netlink_set_ring(sk, &req, false,
2178 optname == NETLINK_TX_RING);
2181 #endif /* CONFIG_NETLINK_MMAP */
2188 static int netlink_getsockopt(struct socket *sock, int level, int optname,
2189 char __user *optval, int __user *optlen)
2191 struct sock *sk = sock->sk;
2192 struct netlink_sock *nlk = nlk_sk(sk);
2195 if (level != SOL_NETLINK)
2196 return -ENOPROTOOPT;
2198 if (get_user(len, optlen))
2204 case NETLINK_PKTINFO:
2205 if (len < sizeof(int))
2208 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
2209 if (put_user(len, optlen) ||
2210 put_user(val, optval))
2214 case NETLINK_BROADCAST_ERROR:
2215 if (len < sizeof(int))
2218 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2219 if (put_user(len, optlen) ||
2220 put_user(val, optval))
2224 case NETLINK_NO_ENOBUFS:
2225 if (len < sizeof(int))
2228 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2229 if (put_user(len, optlen) ||
2230 put_user(val, optval))
2240 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2242 struct nl_pktinfo info;
2244 info.group = NETLINK_CB(skb).dst_group;
2245 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2248 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2249 struct msghdr *msg, size_t len)
2251 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2252 struct sock *sk = sock->sk;
2253 struct netlink_sock *nlk = nlk_sk(sk);
2254 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2257 struct sk_buff *skb;
2259 struct scm_cookie scm;
2260 u32 netlink_skb_flags = 0;
2262 if (msg->msg_flags&MSG_OOB)
2265 if (NULL == siocb->scm)
2268 err = scm_send(sock, msg, siocb->scm, true);
2272 if (msg->msg_namelen) {
2274 if (addr->nl_family != AF_NETLINK)
2276 dst_portid = addr->nl_pid;
2277 dst_group = ffs(addr->nl_groups);
2279 if ((dst_group || dst_portid) &&
2280 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
2282 netlink_skb_flags |= NETLINK_SKB_DST;
2284 dst_portid = nlk->dst_portid;
2285 dst_group = nlk->dst_group;
2289 err = netlink_autobind(sock);
2294 if (netlink_tx_is_mmaped(sk) &&
2295 msg->msg_iter.iov->iov_base == NULL) {
2296 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2302 if (len > sk->sk_sndbuf - 32)
2305 skb = netlink_alloc_large_skb(len, dst_group);
2309 NETLINK_CB(skb).portid = nlk->portid;
2310 NETLINK_CB(skb).dst_group = dst_group;
2311 NETLINK_CB(skb).creds = siocb->scm->creds;
2312 NETLINK_CB(skb).flags = netlink_skb_flags;
2315 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
2320 err = security_netlink_send(sk, skb);
2327 atomic_inc(&skb->users);
2328 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
2330 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
2333 scm_destroy(siocb->scm);
2337 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2338 struct msghdr *msg, size_t len,
2341 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2342 struct scm_cookie scm;
2343 struct sock *sk = sock->sk;
2344 struct netlink_sock *nlk = nlk_sk(sk);
2345 int noblock = flags&MSG_DONTWAIT;
2347 struct sk_buff *skb, *data_skb;
2355 skb = skb_recv_datagram(sk, flags, noblock, &err);
2361 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2362 if (unlikely(skb_shinfo(skb)->frag_list)) {
2364 * If this skb has a frag_list, then here that means that we
2365 * will have to use the frag_list skb's data for compat tasks
2366 * and the regular skb's data for normal (non-compat) tasks.
2368 * If we need to send the compat skb, assign it to the
2369 * 'data_skb' variable so that it will be used below for data
2370 * copying. We keep 'skb' for everything else, including
2371 * freeing both later.
2373 if (flags & MSG_CMSG_COMPAT)
2374 data_skb = skb_shinfo(skb)->frag_list;
2378 /* Record the max length of recvmsg() calls for future allocations */
2379 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2380 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2383 copied = data_skb->len;
2385 msg->msg_flags |= MSG_TRUNC;
2389 skb_reset_transport_header(data_skb);
2390 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
2392 if (msg->msg_name) {
2393 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2394 addr->nl_family = AF_NETLINK;
2396 addr->nl_pid = NETLINK_CB(skb).portid;
2397 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
2398 msg->msg_namelen = sizeof(*addr);
2401 if (nlk->flags & NETLINK_RECV_PKTINFO)
2402 netlink_cmsg_recv_pktinfo(msg, skb);
2404 if (NULL == siocb->scm) {
2405 memset(&scm, 0, sizeof(scm));
2408 siocb->scm->creds = *NETLINK_CREDS(skb);
2409 if (flags & MSG_TRUNC)
2410 copied = data_skb->len;
2412 skb_free_datagram(sk, skb);
2414 if (nlk->cb_running &&
2415 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2416 ret = netlink_dump(sk);
2419 sk->sk_error_report(sk);
2423 scm_recv(sock, msg, siocb->scm, flags);
2425 netlink_rcv_wake(sk);
2426 return err ? : copied;
2429 static void netlink_data_ready(struct sock *sk)
2435 * We export these functions to other modules. They provide a
2436 * complete set of kernel non-blocking support for message
2441 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2442 struct netlink_kernel_cfg *cfg)
2444 struct socket *sock;
2446 struct netlink_sock *nlk;
2447 struct listeners *listeners = NULL;
2448 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2449 unsigned int groups;
2453 if (unit < 0 || unit >= MAX_LINKS)
2456 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2460 * We have to just have a reference on the net from sk, but don't
2461 * get_net it. Besides, we cannot get and then put the net here.
2462 * So we create one inside init_net and the move it to net.
2465 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2466 goto out_sock_release_nosk;
2469 sk_change_net(sk, net);
2471 if (!cfg || cfg->groups < 32)
2474 groups = cfg->groups;
2476 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2478 goto out_sock_release;
2480 sk->sk_data_ready = netlink_data_ready;
2481 if (cfg && cfg->input)
2482 nlk_sk(sk)->netlink_rcv = cfg->input;
2484 if (netlink_insert(sk, net, 0))
2485 goto out_sock_release;
2488 nlk->flags |= NETLINK_KERNEL_SOCKET;
2490 netlink_table_grab();
2491 if (!nl_table[unit].registered) {
2492 nl_table[unit].groups = groups;
2493 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2494 nl_table[unit].cb_mutex = cb_mutex;
2495 nl_table[unit].module = module;
2497 nl_table[unit].bind = cfg->bind;
2498 nl_table[unit].unbind = cfg->unbind;
2499 nl_table[unit].flags = cfg->flags;
2501 nl_table[unit].compare = cfg->compare;
2503 nl_table[unit].registered = 1;
2506 nl_table[unit].registered++;
2508 netlink_table_ungrab();
2513 netlink_kernel_release(sk);
2516 out_sock_release_nosk:
2520 EXPORT_SYMBOL(__netlink_kernel_create);
2523 netlink_kernel_release(struct sock *sk)
2525 sk_release_kernel(sk);
2527 EXPORT_SYMBOL(netlink_kernel_release);
2529 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2531 struct listeners *new, *old;
2532 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2537 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2538 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2541 old = nl_deref_protected(tbl->listeners);
2542 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2543 rcu_assign_pointer(tbl->listeners, new);
2545 kfree_rcu(old, rcu);
2547 tbl->groups = groups;
2553 * netlink_change_ngroups - change number of multicast groups
2555 * This changes the number of multicast groups that are available
2556 * on a certain netlink family. Note that it is not possible to
2557 * change the number of groups to below 32. Also note that it does
2558 * not implicitly call netlink_clear_multicast_users() when the
2559 * number of groups is reduced.
2561 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2562 * @groups: The new number of groups.
2564 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2568 netlink_table_grab();
2569 err = __netlink_change_ngroups(sk, groups);
2570 netlink_table_ungrab();
2575 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2578 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2580 sk_for_each_bound(sk, &tbl->mc_list)
2581 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2585 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2587 struct nlmsghdr *nlh;
2588 int size = nlmsg_msg_size(len);
2590 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
2591 nlh->nlmsg_type = type;
2592 nlh->nlmsg_len = size;
2593 nlh->nlmsg_flags = flags;
2594 nlh->nlmsg_pid = portid;
2595 nlh->nlmsg_seq = seq;
2596 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2597 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2600 EXPORT_SYMBOL(__nlmsg_put);
2603 * It looks a bit ugly.
2604 * It would be better to create kernel thread.
2607 static int netlink_dump(struct sock *sk)
2609 struct netlink_sock *nlk = nlk_sk(sk);
2610 struct netlink_callback *cb;
2611 struct sk_buff *skb = NULL;
2612 struct nlmsghdr *nlh;
2613 int len, err = -ENOBUFS;
2616 mutex_lock(nlk->cb_mutex);
2617 if (!nlk->cb_running) {
2623 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2625 if (!netlink_rx_is_mmaped(sk) &&
2626 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2629 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2630 * required, but it makes sense to _attempt_ a 16K bytes allocation
2631 * to reduce number of system calls on dump operations, if user
2632 * ever provided a big enough buffer.
2634 if (alloc_size < nlk->max_recvmsg_len) {
2635 skb = netlink_alloc_skb(sk,
2636 nlk->max_recvmsg_len,
2641 /* available room should be exact amount to avoid MSG_TRUNC */
2643 skb_reserve(skb, skb_tailroom(skb) -
2644 nlk->max_recvmsg_len);
2647 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2651 netlink_skb_set_owner_r(skb, sk);
2653 len = cb->dump(skb, cb);
2656 mutex_unlock(nlk->cb_mutex);
2658 if (sk_filter(sk, skb))
2661 __netlink_sendskb(sk, skb);
2665 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2669 nl_dump_check_consistent(cb, nlh);
2671 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2673 if (sk_filter(sk, skb))
2676 __netlink_sendskb(sk, skb);
2681 nlk->cb_running = false;
2682 mutex_unlock(nlk->cb_mutex);
2683 module_put(cb->module);
2684 consume_skb(cb->skb);
2688 mutex_unlock(nlk->cb_mutex);
2693 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2694 const struct nlmsghdr *nlh,
2695 struct netlink_dump_control *control)
2697 struct netlink_callback *cb;
2699 struct netlink_sock *nlk;
2702 /* Memory mapped dump requests need to be copied to avoid looping
2703 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2704 * a reference to the skb.
2706 if (netlink_skb_is_mmaped(skb)) {
2707 skb = skb_copy(skb, GFP_KERNEL);
2711 atomic_inc(&skb->users);
2713 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2715 ret = -ECONNREFUSED;
2720 mutex_lock(nlk->cb_mutex);
2721 /* A dump is in progress... */
2722 if (nlk->cb_running) {
2726 /* add reference of module which cb->dump belongs to */
2727 if (!try_module_get(control->module)) {
2728 ret = -EPROTONOSUPPORT;
2733 memset(cb, 0, sizeof(*cb));
2734 cb->dump = control->dump;
2735 cb->done = control->done;
2737 cb->data = control->data;
2738 cb->module = control->module;
2739 cb->min_dump_alloc = control->min_dump_alloc;
2742 nlk->cb_running = true;
2744 mutex_unlock(nlk->cb_mutex);
2746 ret = netlink_dump(sk);
2752 /* We successfully started a dump, by returning -EINTR we
2753 * signal not to send ACK even if it was requested.
2759 mutex_unlock(nlk->cb_mutex);
2764 EXPORT_SYMBOL(__netlink_dump_start);
2766 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2768 struct sk_buff *skb;
2769 struct nlmsghdr *rep;
2770 struct nlmsgerr *errmsg;
2771 size_t payload = sizeof(*errmsg);
2773 /* error messages get the original request appened */
2775 payload += nlmsg_len(nlh);
2777 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2778 NETLINK_CB(in_skb).portid, GFP_KERNEL);
2782 sk = netlink_lookup(sock_net(in_skb->sk),
2783 in_skb->sk->sk_protocol,
2784 NETLINK_CB(in_skb).portid);
2786 sk->sk_err = ENOBUFS;
2787 sk->sk_error_report(sk);
2793 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2794 NLMSG_ERROR, payload, 0);
2795 errmsg = nlmsg_data(rep);
2796 errmsg->error = err;
2797 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
2798 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2800 EXPORT_SYMBOL(netlink_ack);
2802 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2805 struct nlmsghdr *nlh;
2808 while (skb->len >= nlmsg_total_size(0)) {
2811 nlh = nlmsg_hdr(skb);
2814 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2817 /* Only requests are handled by the kernel */
2818 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2821 /* Skip control messages */
2822 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2830 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2831 netlink_ack(skb, nlh, err);
2834 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2835 if (msglen > skb->len)
2837 skb_pull(skb, msglen);
2842 EXPORT_SYMBOL(netlink_rcv_skb);
2845 * nlmsg_notify - send a notification netlink message
2846 * @sk: netlink socket to use
2847 * @skb: notification message
2848 * @portid: destination netlink portid for reports or 0
2849 * @group: destination multicast group or 0
2850 * @report: 1 to report back, 0 to disable
2851 * @flags: allocation flags
2853 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2854 unsigned int group, int report, gfp_t flags)
2859 int exclude_portid = 0;
2862 atomic_inc(&skb->users);
2863 exclude_portid = portid;
2866 /* errors reported via destination sk->sk_err, but propagate
2867 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2868 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2874 err2 = nlmsg_unicast(sk, skb, portid);
2875 if (!err || err == -ESRCH)
2881 EXPORT_SYMBOL(nlmsg_notify);
2883 #ifdef CONFIG_PROC_FS
2884 struct nl_seq_iter {
2885 struct seq_net_private p;
2890 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2892 struct nl_seq_iter *iter = seq->private;
2894 struct netlink_sock *nlk;
2898 for (i = 0; i < MAX_LINKS; i++) {
2899 struct rhashtable *ht = &nl_table[i].hash;
2900 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
2902 for (j = 0; j < tbl->size; j++) {
2903 rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) {
2904 s = (struct sock *)nlk;
2906 if (sock_net(s) != seq_file_net(seq))
2920 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
2921 __acquires(nl_table_lock) __acquires(RCU)
2923 read_lock(&nl_table_lock);
2925 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2928 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2930 struct rhashtable *ht;
2931 struct netlink_sock *nlk;
2932 struct nl_seq_iter *iter;
2938 if (v == SEQ_START_TOKEN)
2939 return netlink_seq_socket_idx(seq, 0);
2941 net = seq_file_net(seq);
2942 iter = seq->private;
2946 ht = &nl_table[i].hash;
2947 rht_for_each_entry(nlk, nlk->node.next, ht, node)
2948 if (net_eq(sock_net((struct sock *)nlk), net))
2951 j = iter->hash_idx + 1;
2954 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
2956 for (; j < tbl->size; j++) {
2957 rht_for_each_entry(nlk, tbl->buckets[j], ht, node) {
2958 if (net_eq(sock_net((struct sock *)nlk), net)) {
2967 } while (++i < MAX_LINKS);
2972 static void netlink_seq_stop(struct seq_file *seq, void *v)
2973 __releases(RCU) __releases(nl_table_lock)
2976 read_unlock(&nl_table_lock);
2980 static int netlink_seq_show(struct seq_file *seq, void *v)
2982 if (v == SEQ_START_TOKEN) {
2984 "sk Eth Pid Groups "
2985 "Rmem Wmem Dump Locks Drops Inode\n");
2988 struct netlink_sock *nlk = nlk_sk(s);
2990 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
2994 nlk->groups ? (u32)nlk->groups[0] : 0,
2995 sk_rmem_alloc_get(s),
2996 sk_wmem_alloc_get(s),
2998 atomic_read(&s->sk_refcnt),
2999 atomic_read(&s->sk_drops),
3007 static const struct seq_operations netlink_seq_ops = {
3008 .start = netlink_seq_start,
3009 .next = netlink_seq_next,
3010 .stop = netlink_seq_stop,
3011 .show = netlink_seq_show,
3015 static int netlink_seq_open(struct inode *inode, struct file *file)
3017 return seq_open_net(inode, file, &netlink_seq_ops,
3018 sizeof(struct nl_seq_iter));
3021 static const struct file_operations netlink_seq_fops = {
3022 .owner = THIS_MODULE,
3023 .open = netlink_seq_open,
3025 .llseek = seq_lseek,
3026 .release = seq_release_net,
3031 int netlink_register_notifier(struct notifier_block *nb)
3033 return atomic_notifier_chain_register(&netlink_chain, nb);
3035 EXPORT_SYMBOL(netlink_register_notifier);
3037 int netlink_unregister_notifier(struct notifier_block *nb)
3039 return atomic_notifier_chain_unregister(&netlink_chain, nb);
3041 EXPORT_SYMBOL(netlink_unregister_notifier);
3043 static const struct proto_ops netlink_ops = {
3044 .family = PF_NETLINK,
3045 .owner = THIS_MODULE,
3046 .release = netlink_release,
3047 .bind = netlink_bind,
3048 .connect = netlink_connect,
3049 .socketpair = sock_no_socketpair,
3050 .accept = sock_no_accept,
3051 .getname = netlink_getname,
3052 .poll = netlink_poll,
3053 .ioctl = sock_no_ioctl,
3054 .listen = sock_no_listen,
3055 .shutdown = sock_no_shutdown,
3056 .setsockopt = netlink_setsockopt,
3057 .getsockopt = netlink_getsockopt,
3058 .sendmsg = netlink_sendmsg,
3059 .recvmsg = netlink_recvmsg,
3060 .mmap = netlink_mmap,
3061 .sendpage = sock_no_sendpage,
3064 static const struct net_proto_family netlink_family_ops = {
3065 .family = PF_NETLINK,
3066 .create = netlink_create,
3067 .owner = THIS_MODULE, /* for consistency 8) */
3070 static int __net_init netlink_net_init(struct net *net)
3072 #ifdef CONFIG_PROC_FS
3073 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
3079 static void __net_exit netlink_net_exit(struct net *net)
3081 #ifdef CONFIG_PROC_FS
3082 remove_proc_entry("netlink", net->proc_net);
3086 static void __init netlink_add_usersock_entry(void)
3088 struct listeners *listeners;
3091 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
3093 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
3095 netlink_table_grab();
3097 nl_table[NETLINK_USERSOCK].groups = groups;
3098 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
3099 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3100 nl_table[NETLINK_USERSOCK].registered = 1;
3101 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
3103 netlink_table_ungrab();
3106 static struct pernet_operations __net_initdata netlink_net_ops = {
3107 .init = netlink_net_init,
3108 .exit = netlink_net_exit,
3111 static int __init netlink_proto_init(void)
3114 int err = proto_register(&netlink_proto, 0);
3115 struct rhashtable_params ht_params = {
3116 .head_offset = offsetof(struct netlink_sock, node),
3117 .key_offset = offsetof(struct netlink_sock, portid),
3118 .key_len = sizeof(u32), /* portid */
3120 .max_shift = 16, /* 64K */
3121 .grow_decision = rht_grow_above_75,
3122 .shrink_decision = rht_shrink_below_30,
3123 #ifdef CONFIG_PROVE_LOCKING
3124 .mutex_is_held = lockdep_nl_sk_hash_is_held,
3131 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
3133 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
3137 for (i = 0; i < MAX_LINKS; i++) {
3138 if (rhashtable_init(&nl_table[i].hash, &ht_params) < 0) {
3140 rhashtable_destroy(&nl_table[i].hash);
3146 INIT_LIST_HEAD(&netlink_tap_all);
3148 netlink_add_usersock_entry();
3150 sock_register(&netlink_family_ops);
3151 register_pernet_subsys(&netlink_net_ops);
3152 /* The netlink device handler may be needed early. */
3157 panic("netlink_init: Cannot allocate nl_table\n");
3160 core_initcall(netlink_proto_init);