2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
118 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
119 EXPORT_SYMBOL_GPL(unix_socket_table);
120 DEFINE_SPINLOCK(unix_table_lock);
121 EXPORT_SYMBOL_GPL(unix_table_lock);
122 static atomic_long_t unix_nr_socks;
125 static struct hlist_head *unix_sockets_unbound(void *addr)
127 unsigned long hash = (unsigned long)addr;
131 hash %= UNIX_HASH_SIZE;
132 return &unix_socket_table[UNIX_HASH_SIZE + hash];
135 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
137 #ifdef CONFIG_SECURITY_NETWORK
138 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
143 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
145 scm->secid = *UNIXSID(skb);
148 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
151 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
153 #endif /* CONFIG_SECURITY_NETWORK */
156 * SMP locking strategy:
157 * hash table is protected with spinlock unix_table_lock
158 * each socket state is protected by separate spin lock.
161 static inline unsigned int unix_hash_fold(__wsum n)
163 unsigned int hash = (__force unsigned int)csum_fold(n);
166 return hash&(UNIX_HASH_SIZE-1);
169 #define unix_peer(sk) (unix_sk(sk)->peer)
171 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
173 return unix_peer(osk) == sk;
176 static inline int unix_may_send(struct sock *sk, struct sock *osk)
178 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
181 static inline int unix_recvq_full(struct sock const *sk)
183 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
186 struct sock *unix_peer_get(struct sock *s)
194 unix_state_unlock(s);
197 EXPORT_SYMBOL_GPL(unix_peer_get);
199 static inline void unix_release_addr(struct unix_address *addr)
201 if (atomic_dec_and_test(&addr->refcnt))
206 * Check unix socket name:
207 * - should be not zero length.
208 * - if started by not zero, should be NULL terminated (FS object)
209 * - if started by zero, it is abstract name.
212 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
214 if (len <= sizeof(short) || len > sizeof(*sunaddr))
216 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
218 if (sunaddr->sun_path[0]) {
220 * This may look like an off by one error but it is a bit more
221 * subtle. 108 is the longest valid AF_UNIX path for a binding.
222 * sun_path[108] doesn't as such exist. However in kernel space
223 * we are guaranteed that it is a valid memory location in our
224 * kernel address buffer.
226 ((char *)sunaddr)[len] = 0;
227 len = strlen(sunaddr->sun_path)+1+sizeof(short);
231 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
235 static void __unix_remove_socket(struct sock *sk)
237 sk_del_node_init(sk);
240 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
242 WARN_ON(!sk_unhashed(sk));
243 sk_add_node(sk, list);
246 static inline void unix_remove_socket(struct sock *sk)
248 spin_lock(&unix_table_lock);
249 __unix_remove_socket(sk);
250 spin_unlock(&unix_table_lock);
253 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
255 spin_lock(&unix_table_lock);
256 __unix_insert_socket(list, sk);
257 spin_unlock(&unix_table_lock);
260 static struct sock *__unix_find_socket_byname(struct net *net,
261 struct sockaddr_un *sunname,
262 int len, int type, unsigned int hash)
266 sk_for_each(s, &unix_socket_table[hash ^ type]) {
267 struct unix_sock *u = unix_sk(s);
269 if (!net_eq(sock_net(s), net))
272 if (u->addr->len == len &&
273 !memcmp(u->addr->name, sunname, len))
281 static inline struct sock *unix_find_socket_byname(struct net *net,
282 struct sockaddr_un *sunname,
288 spin_lock(&unix_table_lock);
289 s = __unix_find_socket_byname(net, sunname, len, type, hash);
292 spin_unlock(&unix_table_lock);
296 static struct sock *unix_find_socket_byinode(struct inode *i)
300 spin_lock(&unix_table_lock);
302 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
303 struct dentry *dentry = unix_sk(s)->path.dentry;
305 if (dentry && dentry->d_inode == i) {
312 spin_unlock(&unix_table_lock);
316 static inline int unix_writable(struct sock *sk)
318 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
321 static void unix_write_space(struct sock *sk)
323 struct socket_wq *wq;
326 if (unix_writable(sk)) {
327 wq = rcu_dereference(sk->sk_wq);
328 if (wq_has_sleeper(wq))
329 wake_up_interruptible_sync_poll(&wq->wait,
330 POLLOUT | POLLWRNORM | POLLWRBAND);
331 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
336 /* When dgram socket disconnects (or changes its peer), we clear its receive
337 * queue of packets arrived from previous peer. First, it allows to do
338 * flow control based only on wmem_alloc; second, sk connected to peer
339 * may receive messages only from that peer. */
340 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
342 if (!skb_queue_empty(&sk->sk_receive_queue)) {
343 skb_queue_purge(&sk->sk_receive_queue);
344 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
346 /* If one link of bidirectional dgram pipe is disconnected,
347 * we signal error. Messages are lost. Do not make this,
348 * when peer was not connected to us.
350 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
351 other->sk_err = ECONNRESET;
352 other->sk_error_report(other);
357 static void unix_sock_destructor(struct sock *sk)
359 struct unix_sock *u = unix_sk(sk);
361 skb_queue_purge(&sk->sk_receive_queue);
363 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
364 WARN_ON(!sk_unhashed(sk));
365 WARN_ON(sk->sk_socket);
366 if (!sock_flag(sk, SOCK_DEAD)) {
367 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
372 unix_release_addr(u->addr);
374 atomic_long_dec(&unix_nr_socks);
376 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
378 #ifdef UNIX_REFCNT_DEBUG
379 printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
380 atomic_long_read(&unix_nr_socks));
384 static void unix_release_sock(struct sock *sk, int embrion)
386 struct unix_sock *u = unix_sk(sk);
392 unix_remove_socket(sk);
397 sk->sk_shutdown = SHUTDOWN_MASK;
399 u->path.dentry = NULL;
401 state = sk->sk_state;
402 sk->sk_state = TCP_CLOSE;
403 unix_state_unlock(sk);
405 wake_up_interruptible_all(&u->peer_wait);
407 skpair = unix_peer(sk);
409 if (skpair != NULL) {
410 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
411 unix_state_lock(skpair);
413 skpair->sk_shutdown = SHUTDOWN_MASK;
414 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
415 skpair->sk_err = ECONNRESET;
416 unix_state_unlock(skpair);
417 skpair->sk_state_change(skpair);
418 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
420 sock_put(skpair); /* It may now die */
421 unix_peer(sk) = NULL;
424 /* Try to flush out this socket. Throw out buffers at least */
426 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
427 if (state == TCP_LISTEN)
428 unix_release_sock(skb->sk, 1);
429 /* passed fds are erased in the kfree_skb hook */
438 /* ---- Socket is dead now and most probably destroyed ---- */
441 * Fixme: BSD difference: In BSD all sockets connected to us get
442 * ECONNRESET and we die on the spot. In Linux we behave
443 * like files and pipes do and wait for the last
446 * Can't we simply set sock->err?
448 * What the above comment does talk about? --ANK(980817)
451 if (unix_tot_inflight)
452 unix_gc(); /* Garbage collect fds */
455 static void init_peercred(struct sock *sk)
457 put_pid(sk->sk_peer_pid);
458 if (sk->sk_peer_cred)
459 put_cred(sk->sk_peer_cred);
460 sk->sk_peer_pid = get_pid(task_tgid(current));
461 sk->sk_peer_cred = get_current_cred();
464 static void copy_peercred(struct sock *sk, struct sock *peersk)
466 put_pid(sk->sk_peer_pid);
467 if (sk->sk_peer_cred)
468 put_cred(sk->sk_peer_cred);
469 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
470 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
473 static int unix_listen(struct socket *sock, int backlog)
476 struct sock *sk = sock->sk;
477 struct unix_sock *u = unix_sk(sk);
478 struct pid *old_pid = NULL;
481 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
482 goto out; /* Only stream/seqpacket sockets accept */
485 goto out; /* No listens on an unbound socket */
487 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
489 if (backlog > sk->sk_max_ack_backlog)
490 wake_up_interruptible_all(&u->peer_wait);
491 sk->sk_max_ack_backlog = backlog;
492 sk->sk_state = TCP_LISTEN;
493 /* set credentials so connect can copy them */
498 unix_state_unlock(sk);
504 static int unix_release(struct socket *);
505 static int unix_bind(struct socket *, struct sockaddr *, int);
506 static int unix_stream_connect(struct socket *, struct sockaddr *,
507 int addr_len, int flags);
508 static int unix_socketpair(struct socket *, struct socket *);
509 static int unix_accept(struct socket *, struct socket *, int);
510 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
511 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
512 static unsigned int unix_dgram_poll(struct file *, struct socket *,
514 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
515 static int unix_shutdown(struct socket *, int);
516 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
517 struct msghdr *, size_t);
518 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
519 struct msghdr *, size_t, int);
520 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
521 struct msghdr *, size_t);
522 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
523 struct msghdr *, size_t, int);
524 static int unix_dgram_connect(struct socket *, struct sockaddr *,
526 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
527 struct msghdr *, size_t);
528 static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
529 struct msghdr *, size_t, int);
531 static int unix_set_peek_off(struct sock *sk, int val)
533 struct unix_sock *u = unix_sk(sk);
535 if (mutex_lock_interruptible(&u->readlock))
538 sk->sk_peek_off = val;
539 mutex_unlock(&u->readlock);
545 static const struct proto_ops unix_stream_ops = {
547 .owner = THIS_MODULE,
548 .release = unix_release,
550 .connect = unix_stream_connect,
551 .socketpair = unix_socketpair,
552 .accept = unix_accept,
553 .getname = unix_getname,
556 .listen = unix_listen,
557 .shutdown = unix_shutdown,
558 .setsockopt = sock_no_setsockopt,
559 .getsockopt = sock_no_getsockopt,
560 .sendmsg = unix_stream_sendmsg,
561 .recvmsg = unix_stream_recvmsg,
562 .mmap = sock_no_mmap,
563 .sendpage = sock_no_sendpage,
564 .set_peek_off = unix_set_peek_off,
567 static const struct proto_ops unix_dgram_ops = {
569 .owner = THIS_MODULE,
570 .release = unix_release,
572 .connect = unix_dgram_connect,
573 .socketpair = unix_socketpair,
574 .accept = sock_no_accept,
575 .getname = unix_getname,
576 .poll = unix_dgram_poll,
578 .listen = sock_no_listen,
579 .shutdown = unix_shutdown,
580 .setsockopt = sock_no_setsockopt,
581 .getsockopt = sock_no_getsockopt,
582 .sendmsg = unix_dgram_sendmsg,
583 .recvmsg = unix_dgram_recvmsg,
584 .mmap = sock_no_mmap,
585 .sendpage = sock_no_sendpage,
586 .set_peek_off = unix_set_peek_off,
589 static const struct proto_ops unix_seqpacket_ops = {
591 .owner = THIS_MODULE,
592 .release = unix_release,
594 .connect = unix_stream_connect,
595 .socketpair = unix_socketpair,
596 .accept = unix_accept,
597 .getname = unix_getname,
598 .poll = unix_dgram_poll,
600 .listen = unix_listen,
601 .shutdown = unix_shutdown,
602 .setsockopt = sock_no_setsockopt,
603 .getsockopt = sock_no_getsockopt,
604 .sendmsg = unix_seqpacket_sendmsg,
605 .recvmsg = unix_seqpacket_recvmsg,
606 .mmap = sock_no_mmap,
607 .sendpage = sock_no_sendpage,
608 .set_peek_off = unix_set_peek_off,
611 static struct proto unix_proto = {
613 .owner = THIS_MODULE,
614 .obj_size = sizeof(struct unix_sock),
618 * AF_UNIX sockets do not interact with hardware, hence they
619 * dont trigger interrupts - so it's safe for them to have
620 * bh-unsafe locking for their sk_receive_queue.lock. Split off
621 * this special lock-class by reinitializing the spinlock key:
623 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
625 static struct sock *unix_create1(struct net *net, struct socket *sock)
627 struct sock *sk = NULL;
630 atomic_long_inc(&unix_nr_socks);
631 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
634 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
638 sock_init_data(sock, sk);
639 lockdep_set_class(&sk->sk_receive_queue.lock,
640 &af_unix_sk_receive_queue_lock_key);
642 sk->sk_write_space = unix_write_space;
643 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
644 sk->sk_destruct = unix_sock_destructor;
646 u->path.dentry = NULL;
648 spin_lock_init(&u->lock);
649 atomic_long_set(&u->inflight, 0);
650 INIT_LIST_HEAD(&u->link);
651 mutex_init(&u->readlock); /* single task reading lock */
652 init_waitqueue_head(&u->peer_wait);
653 unix_insert_socket(unix_sockets_unbound(sk), sk);
656 atomic_long_dec(&unix_nr_socks);
659 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
665 static int unix_create(struct net *net, struct socket *sock, int protocol,
668 if (protocol && protocol != PF_UNIX)
669 return -EPROTONOSUPPORT;
671 sock->state = SS_UNCONNECTED;
673 switch (sock->type) {
675 sock->ops = &unix_stream_ops;
678 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
682 sock->type = SOCK_DGRAM;
684 sock->ops = &unix_dgram_ops;
687 sock->ops = &unix_seqpacket_ops;
690 return -ESOCKTNOSUPPORT;
693 return unix_create1(net, sock) ? 0 : -ENOMEM;
696 static int unix_release(struct socket *sock)
698 struct sock *sk = sock->sk;
703 unix_release_sock(sk, 0);
709 static int unix_autobind(struct socket *sock)
711 struct sock *sk = sock->sk;
712 struct net *net = sock_net(sk);
713 struct unix_sock *u = unix_sk(sk);
714 static u32 ordernum = 1;
715 struct unix_address *addr;
717 unsigned int retries = 0;
719 err = mutex_lock_interruptible(&u->readlock);
728 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
732 addr->name->sun_family = AF_UNIX;
733 atomic_set(&addr->refcnt, 1);
736 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
737 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
739 spin_lock(&unix_table_lock);
740 ordernum = (ordernum+1)&0xFFFFF;
742 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
744 spin_unlock(&unix_table_lock);
746 * __unix_find_socket_byname() may take long time if many names
747 * are already in use.
750 /* Give up if all names seems to be in use. */
751 if (retries++ == 0xFFFFF) {
758 addr->hash ^= sk->sk_type;
760 __unix_remove_socket(sk);
762 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
763 spin_unlock(&unix_table_lock);
766 out: mutex_unlock(&u->readlock);
770 static struct sock *unix_find_other(struct net *net,
771 struct sockaddr_un *sunname, int len,
772 int type, unsigned int hash, int *error)
778 if (sunname->sun_path[0]) {
780 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
783 inode = path.dentry->d_inode;
784 err = inode_permission(inode, MAY_WRITE);
789 if (!S_ISSOCK(inode->i_mode))
791 u = unix_find_socket_byinode(inode);
795 if (u->sk_type == type)
801 if (u->sk_type != type) {
807 u = unix_find_socket_byname(net, sunname, len, type, hash);
809 struct dentry *dentry;
810 dentry = unix_sk(u)->path.dentry;
812 touch_atime(&unix_sk(u)->path);
825 static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
827 struct dentry *dentry;
831 * Get the parent directory, calculate the hash for last
834 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
835 err = PTR_ERR(dentry);
840 * All right, let's create it.
842 err = security_path_mknod(&path, dentry, mode, 0);
844 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
846 res->mnt = mntget(path.mnt);
847 res->dentry = dget(dentry);
850 done_path_create(&path, dentry);
854 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
856 struct sock *sk = sock->sk;
857 struct net *net = sock_net(sk);
858 struct unix_sock *u = unix_sk(sk);
859 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
860 char *sun_path = sunaddr->sun_path;
863 struct unix_address *addr;
864 struct hlist_head *list;
867 if (sunaddr->sun_family != AF_UNIX)
870 if (addr_len == sizeof(short)) {
871 err = unix_autobind(sock);
875 err = unix_mkname(sunaddr, addr_len, &hash);
880 err = mutex_lock_interruptible(&u->readlock);
889 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
893 memcpy(addr->name, sunaddr, addr_len);
894 addr->len = addr_len;
895 addr->hash = hash ^ sk->sk_type;
896 atomic_set(&addr->refcnt, 1);
900 umode_t mode = S_IFSOCK |
901 (SOCK_INODE(sock)->i_mode & ~current_umask());
902 err = unix_mknod(sun_path, mode, &path);
906 unix_release_addr(addr);
909 addr->hash = UNIX_HASH_SIZE;
910 hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
911 spin_lock(&unix_table_lock);
913 list = &unix_socket_table[hash];
915 spin_lock(&unix_table_lock);
917 if (__unix_find_socket_byname(net, sunaddr, addr_len,
918 sk->sk_type, hash)) {
919 unix_release_addr(addr);
923 list = &unix_socket_table[addr->hash];
927 __unix_remove_socket(sk);
929 __unix_insert_socket(list, sk);
932 spin_unlock(&unix_table_lock);
934 mutex_unlock(&u->readlock);
939 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
941 if (unlikely(sk1 == sk2) || !sk2) {
942 unix_state_lock(sk1);
946 unix_state_lock(sk1);
947 unix_state_lock_nested(sk2);
949 unix_state_lock(sk2);
950 unix_state_lock_nested(sk1);
954 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
956 if (unlikely(sk1 == sk2) || !sk2) {
957 unix_state_unlock(sk1);
960 unix_state_unlock(sk1);
961 unix_state_unlock(sk2);
964 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
967 struct sock *sk = sock->sk;
968 struct net *net = sock_net(sk);
969 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
974 if (addr->sa_family != AF_UNSPEC) {
975 err = unix_mkname(sunaddr, alen, &hash);
980 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
981 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
985 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
989 unix_state_double_lock(sk, other);
991 /* Apparently VFS overslept socket death. Retry. */
992 if (sock_flag(other, SOCK_DEAD)) {
993 unix_state_double_unlock(sk, other);
999 if (!unix_may_send(sk, other))
1002 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1008 * 1003.1g breaking connected state with AF_UNSPEC
1011 unix_state_double_lock(sk, other);
1015 * If it was connected, reconnect.
1017 if (unix_peer(sk)) {
1018 struct sock *old_peer = unix_peer(sk);
1019 unix_peer(sk) = other;
1020 unix_state_double_unlock(sk, other);
1022 if (other != old_peer)
1023 unix_dgram_disconnected(sk, old_peer);
1026 unix_peer(sk) = other;
1027 unix_state_double_unlock(sk, other);
1032 unix_state_double_unlock(sk, other);
1038 static long unix_wait_for_peer(struct sock *other, long timeo)
1040 struct unix_sock *u = unix_sk(other);
1044 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1046 sched = !sock_flag(other, SOCK_DEAD) &&
1047 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1048 unix_recvq_full(other);
1050 unix_state_unlock(other);
1053 timeo = schedule_timeout(timeo);
1055 finish_wait(&u->peer_wait, &wait);
1059 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1060 int addr_len, int flags)
1062 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1063 struct sock *sk = sock->sk;
1064 struct net *net = sock_net(sk);
1065 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1066 struct sock *newsk = NULL;
1067 struct sock *other = NULL;
1068 struct sk_buff *skb = NULL;
1074 err = unix_mkname(sunaddr, addr_len, &hash);
1079 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1080 (err = unix_autobind(sock)) != 0)
1083 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1085 /* First of all allocate resources.
1086 If we will make it after state is locked,
1087 we will have to recheck all again in any case.
1092 /* create new sock for complete connection */
1093 newsk = unix_create1(sock_net(sk), NULL);
1097 /* Allocate skb for sending to listening sock */
1098 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1103 /* Find listening sock. */
1104 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1108 /* Latch state of peer */
1109 unix_state_lock(other);
1111 /* Apparently VFS overslept socket death. Retry. */
1112 if (sock_flag(other, SOCK_DEAD)) {
1113 unix_state_unlock(other);
1118 err = -ECONNREFUSED;
1119 if (other->sk_state != TCP_LISTEN)
1121 if (other->sk_shutdown & RCV_SHUTDOWN)
1124 if (unix_recvq_full(other)) {
1129 timeo = unix_wait_for_peer(other, timeo);
1131 err = sock_intr_errno(timeo);
1132 if (signal_pending(current))
1140 It is tricky place. We need to grab our state lock and cannot
1141 drop lock on peer. It is dangerous because deadlock is
1142 possible. Connect to self case and simultaneous
1143 attempt to connect are eliminated by checking socket
1144 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1145 check this before attempt to grab lock.
1147 Well, and we have to recheck the state after socket locked.
1153 /* This is ok... continue with connect */
1155 case TCP_ESTABLISHED:
1156 /* Socket is already connected */
1164 unix_state_lock_nested(sk);
1166 if (sk->sk_state != st) {
1167 unix_state_unlock(sk);
1168 unix_state_unlock(other);
1173 err = security_unix_stream_connect(sk, other, newsk);
1175 unix_state_unlock(sk);
1179 /* The way is open! Fastly set all the necessary fields... */
1182 unix_peer(newsk) = sk;
1183 newsk->sk_state = TCP_ESTABLISHED;
1184 newsk->sk_type = sk->sk_type;
1185 init_peercred(newsk);
1186 newu = unix_sk(newsk);
1187 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1188 otheru = unix_sk(other);
1190 /* copy address information from listening to new sock*/
1192 atomic_inc(&otheru->addr->refcnt);
1193 newu->addr = otheru->addr;
1195 if (otheru->path.dentry) {
1196 path_get(&otheru->path);
1197 newu->path = otheru->path;
1200 /* Set credentials */
1201 copy_peercred(sk, other);
1203 sock->state = SS_CONNECTED;
1204 sk->sk_state = TCP_ESTABLISHED;
1207 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1208 unix_peer(sk) = newsk;
1210 unix_state_unlock(sk);
1212 /* take ten and and send info to listening sock */
1213 spin_lock(&other->sk_receive_queue.lock);
1214 __skb_queue_tail(&other->sk_receive_queue, skb);
1215 spin_unlock(&other->sk_receive_queue.lock);
1216 unix_state_unlock(other);
1217 other->sk_data_ready(other, 0);
1223 unix_state_unlock(other);
1228 unix_release_sock(newsk, 0);
1234 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1236 struct sock *ska = socka->sk, *skb = sockb->sk;
1238 /* Join our sockets back to back */
1241 unix_peer(ska) = skb;
1242 unix_peer(skb) = ska;
1246 if (ska->sk_type != SOCK_DGRAM) {
1247 ska->sk_state = TCP_ESTABLISHED;
1248 skb->sk_state = TCP_ESTABLISHED;
1249 socka->state = SS_CONNECTED;
1250 sockb->state = SS_CONNECTED;
1255 static void unix_sock_inherit_flags(const struct socket *old,
1258 if (test_bit(SOCK_PASSCRED, &old->flags))
1259 set_bit(SOCK_PASSCRED, &new->flags);
1260 if (test_bit(SOCK_PASSSEC, &old->flags))
1261 set_bit(SOCK_PASSSEC, &new->flags);
1264 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1266 struct sock *sk = sock->sk;
1268 struct sk_buff *skb;
1272 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1276 if (sk->sk_state != TCP_LISTEN)
1279 /* If socket state is TCP_LISTEN it cannot change (for now...),
1280 * so that no locks are necessary.
1283 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1285 /* This means receive shutdown. */
1292 skb_free_datagram(sk, skb);
1293 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1295 /* attach accepted sock to socket */
1296 unix_state_lock(tsk);
1297 newsock->state = SS_CONNECTED;
1298 unix_sock_inherit_flags(sock, newsock);
1299 sock_graft(tsk, newsock);
1300 unix_state_unlock(tsk);
1308 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1310 struct sock *sk = sock->sk;
1311 struct unix_sock *u;
1312 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1316 sk = unix_peer_get(sk);
1327 unix_state_lock(sk);
1329 sunaddr->sun_family = AF_UNIX;
1330 sunaddr->sun_path[0] = 0;
1331 *uaddr_len = sizeof(short);
1333 struct unix_address *addr = u->addr;
1335 *uaddr_len = addr->len;
1336 memcpy(sunaddr, addr->name, *uaddr_len);
1338 unix_state_unlock(sk);
1344 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1348 scm->fp = UNIXCB(skb).fp;
1349 UNIXCB(skb).fp = NULL;
1351 for (i = scm->fp->count-1; i >= 0; i--)
1352 unix_notinflight(scm->fp->fp[i]);
1355 static void unix_destruct_scm(struct sk_buff *skb)
1357 struct scm_cookie scm;
1358 memset(&scm, 0, sizeof(scm));
1359 scm.pid = UNIXCB(skb).pid;
1361 unix_detach_fds(&scm, skb);
1363 /* Alas, it calls VFS */
1364 /* So fscking what? fput() had been SMP-safe since the last Summer */
1369 #define MAX_RECURSION_LEVEL 4
1371 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1374 unsigned char max_level = 0;
1375 int unix_sock_count = 0;
1377 for (i = scm->fp->count - 1; i >= 0; i--) {
1378 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1382 max_level = max(max_level,
1383 unix_sk(sk)->recursion_level);
1386 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1387 return -ETOOMANYREFS;
1390 * Need to duplicate file references for the sake of garbage
1391 * collection. Otherwise a socket in the fps might become a
1392 * candidate for GC while the skb is not yet queued.
1394 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1395 if (!UNIXCB(skb).fp)
1398 if (unix_sock_count) {
1399 for (i = scm->fp->count - 1; i >= 0; i--)
1400 unix_inflight(scm->fp->fp[i]);
1405 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1409 UNIXCB(skb).pid = get_pid(scm->pid);
1410 UNIXCB(skb).uid = scm->creds.uid;
1411 UNIXCB(skb).gid = scm->creds.gid;
1412 UNIXCB(skb).fp = NULL;
1413 if (scm->fp && send_fds)
1414 err = unix_attach_fds(scm, skb);
1416 skb->destructor = unix_destruct_scm;
1421 * Some apps rely on write() giving SCM_CREDENTIALS
1422 * We include credentials if source or destination socket
1423 * asserted SOCK_PASSCRED.
1425 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1426 const struct sock *other)
1428 if (UNIXCB(skb).pid)
1430 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1431 !other->sk_socket ||
1432 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1433 UNIXCB(skb).pid = get_pid(task_tgid(current));
1434 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1439 * Send AF_UNIX data.
1442 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1443 struct msghdr *msg, size_t len)
1445 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1446 struct sock *sk = sock->sk;
1447 struct net *net = sock_net(sk);
1448 struct unix_sock *u = unix_sk(sk);
1449 struct sockaddr_un *sunaddr = msg->msg_name;
1450 struct sock *other = NULL;
1451 int namelen = 0; /* fake GCC */
1454 struct sk_buff *skb;
1456 struct scm_cookie tmp_scm;
1460 if (NULL == siocb->scm)
1461 siocb->scm = &tmp_scm;
1463 err = scm_send(sock, msg, siocb->scm, false);
1468 if (msg->msg_flags&MSG_OOB)
1471 if (msg->msg_namelen) {
1472 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1479 other = unix_peer_get(sk);
1484 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1485 && (err = unix_autobind(sock)) != 0)
1489 if (len > sk->sk_sndbuf - 32)
1492 if (len > SKB_MAX_ALLOC)
1493 data_len = min_t(size_t,
1494 len - SKB_MAX_ALLOC,
1495 MAX_SKB_FRAGS * PAGE_SIZE);
1497 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1498 msg->msg_flags & MSG_DONTWAIT, &err);
1502 err = unix_scm_to_skb(siocb->scm, skb, true);
1505 max_level = err + 1;
1506 unix_get_secdata(siocb->scm, skb);
1508 skb_put(skb, len - data_len);
1509 skb->data_len = data_len;
1511 err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
1515 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1520 if (sunaddr == NULL)
1523 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1529 if (sk_filter(other, skb) < 0) {
1530 /* Toss the packet but do not return any error to the sender */
1535 unix_state_lock(other);
1537 if (!unix_may_send(sk, other))
1540 if (sock_flag(other, SOCK_DEAD)) {
1542 * Check with 1003.1g - what should
1545 unix_state_unlock(other);
1549 unix_state_lock(sk);
1550 if (unix_peer(sk) == other) {
1551 unix_peer(sk) = NULL;
1552 unix_state_unlock(sk);
1554 unix_dgram_disconnected(sk, other);
1556 err = -ECONNREFUSED;
1558 unix_state_unlock(sk);
1568 if (other->sk_shutdown & RCV_SHUTDOWN)
1571 if (sk->sk_type != SOCK_SEQPACKET) {
1572 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1577 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1583 timeo = unix_wait_for_peer(other, timeo);
1585 err = sock_intr_errno(timeo);
1586 if (signal_pending(current))
1592 if (sock_flag(other, SOCK_RCVTSTAMP))
1593 __net_timestamp(skb);
1594 maybe_add_creds(skb, sock, other);
1595 skb_queue_tail(&other->sk_receive_queue, skb);
1596 if (max_level > unix_sk(other)->recursion_level)
1597 unix_sk(other)->recursion_level = max_level;
1598 unix_state_unlock(other);
1599 other->sk_data_ready(other, len);
1601 scm_destroy(siocb->scm);
1605 unix_state_unlock(other);
1611 scm_destroy(siocb->scm);
1616 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1617 struct msghdr *msg, size_t len)
1619 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1620 struct sock *sk = sock->sk;
1621 struct sock *other = NULL;
1623 struct sk_buff *skb;
1625 struct scm_cookie tmp_scm;
1626 bool fds_sent = false;
1629 if (NULL == siocb->scm)
1630 siocb->scm = &tmp_scm;
1632 err = scm_send(sock, msg, siocb->scm, false);
1637 if (msg->msg_flags&MSG_OOB)
1640 if (msg->msg_namelen) {
1641 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1645 other = unix_peer(sk);
1650 if (sk->sk_shutdown & SEND_SHUTDOWN)
1653 while (sent < len) {
1655 * Optimisation for the fact that under 0.01% of X
1656 * messages typically need breaking up.
1661 /* Keep two messages in the pipe so it schedules better */
1662 if (size > ((sk->sk_sndbuf >> 1) - 64))
1663 size = (sk->sk_sndbuf >> 1) - 64;
1665 if (size > SKB_MAX_ALLOC)
1666 size = SKB_MAX_ALLOC;
1672 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1679 * If you pass two values to the sock_alloc_send_skb
1680 * it tries to grab the large buffer with GFP_NOFS
1681 * (which can fail easily), and if it fails grab the
1682 * fallback size buffer which is under a page and will
1685 size = min_t(int, size, skb_tailroom(skb));
1688 /* Only send the fds in the first buffer */
1689 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1694 max_level = err + 1;
1697 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1703 unix_state_lock(other);
1705 if (sock_flag(other, SOCK_DEAD) ||
1706 (other->sk_shutdown & RCV_SHUTDOWN))
1709 maybe_add_creds(skb, sock, other);
1710 skb_queue_tail(&other->sk_receive_queue, skb);
1711 if (max_level > unix_sk(other)->recursion_level)
1712 unix_sk(other)->recursion_level = max_level;
1713 unix_state_unlock(other);
1714 other->sk_data_ready(other, size);
1718 scm_destroy(siocb->scm);
1724 unix_state_unlock(other);
1727 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1728 send_sig(SIGPIPE, current, 0);
1731 scm_destroy(siocb->scm);
1733 return sent ? : err;
1736 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1737 struct msghdr *msg, size_t len)
1740 struct sock *sk = sock->sk;
1742 err = sock_error(sk);
1746 if (sk->sk_state != TCP_ESTABLISHED)
1749 if (msg->msg_namelen)
1750 msg->msg_namelen = 0;
1752 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1755 static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1756 struct msghdr *msg, size_t size,
1759 struct sock *sk = sock->sk;
1761 if (sk->sk_state != TCP_ESTABLISHED)
1764 return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
1767 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1769 struct unix_sock *u = unix_sk(sk);
1772 msg->msg_namelen = u->addr->len;
1773 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1777 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1778 struct msghdr *msg, size_t size,
1781 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1782 struct scm_cookie tmp_scm;
1783 struct sock *sk = sock->sk;
1784 struct unix_sock *u = unix_sk(sk);
1785 int noblock = flags & MSG_DONTWAIT;
1786 struct sk_buff *skb;
1794 err = mutex_lock_interruptible(&u->readlock);
1796 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1800 skip = sk_peek_offset(sk, flags);
1802 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
1804 unix_state_lock(sk);
1805 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1806 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1807 (sk->sk_shutdown & RCV_SHUTDOWN))
1809 unix_state_unlock(sk);
1813 wake_up_interruptible_sync_poll(&u->peer_wait,
1814 POLLOUT | POLLWRNORM | POLLWRBAND);
1817 unix_copy_addr(msg, skb->sk);
1819 if (size > skb->len - skip)
1820 size = skb->len - skip;
1821 else if (size < skb->len - skip)
1822 msg->msg_flags |= MSG_TRUNC;
1824 err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
1828 if (sock_flag(sk, SOCK_RCVTSTAMP))
1829 __sock_recv_timestamp(msg, sk, skb);
1832 siocb->scm = &tmp_scm;
1833 memset(&tmp_scm, 0, sizeof(tmp_scm));
1835 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
1836 unix_set_secdata(siocb->scm, skb);
1838 if (!(flags & MSG_PEEK)) {
1840 unix_detach_fds(siocb->scm, skb);
1842 sk_peek_offset_bwd(sk, skb->len);
1844 /* It is questionable: on PEEK we could:
1845 - do not return fds - good, but too simple 8)
1846 - return fds, and do not return them on read (old strategy,
1848 - clone fds (I chose it for now, it is the most universal
1851 POSIX 1003.1g does not actually define this clearly
1852 at all. POSIX 1003.1g doesn't define a lot of things
1857 sk_peek_offset_fwd(sk, size);
1860 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1862 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
1864 scm_recv(sock, msg, siocb->scm, flags);
1867 skb_free_datagram(sk, skb);
1869 mutex_unlock(&u->readlock);
1875 * Sleep until more data has arrived. But check for races..
1877 static long unix_stream_data_wait(struct sock *sk, long timeo,
1878 struct sk_buff *last)
1882 unix_state_lock(sk);
1885 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1887 if (skb_peek_tail(&sk->sk_receive_queue) != last ||
1889 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1890 signal_pending(current) ||
1894 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1895 unix_state_unlock(sk);
1896 timeo = schedule_timeout(timeo);
1897 unix_state_lock(sk);
1898 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1901 finish_wait(sk_sleep(sk), &wait);
1902 unix_state_unlock(sk);
1906 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1907 struct msghdr *msg, size_t size,
1910 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1911 struct scm_cookie tmp_scm;
1912 struct sock *sk = sock->sk;
1913 struct unix_sock *u = unix_sk(sk);
1914 struct sockaddr_un *sunaddr = msg->msg_name;
1916 int check_creds = 0;
1923 if (sk->sk_state != TCP_ESTABLISHED)
1930 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1931 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1933 /* Lock the socket to prevent queue disordering
1934 * while sleeps in memcpy_tomsg
1938 siocb->scm = &tmp_scm;
1939 memset(&tmp_scm, 0, sizeof(tmp_scm));
1942 err = mutex_lock_interruptible(&u->readlock);
1944 err = sock_intr_errno(timeo);
1950 struct sk_buff *skb, *last;
1952 unix_state_lock(sk);
1953 last = skb = skb_peek(&sk->sk_receive_queue);
1956 unix_sk(sk)->recursion_level = 0;
1957 if (copied >= target)
1961 * POSIX 1003.1g mandates this order.
1964 err = sock_error(sk);
1967 if (sk->sk_shutdown & RCV_SHUTDOWN)
1970 unix_state_unlock(sk);
1974 mutex_unlock(&u->readlock);
1976 timeo = unix_stream_data_wait(sk, timeo, last);
1978 if (signal_pending(current)
1979 || mutex_lock_interruptible(&u->readlock)) {
1980 err = sock_intr_errno(timeo);
1986 unix_state_unlock(sk);
1990 skip = sk_peek_offset(sk, flags);
1991 while (skip >= skb->len) {
1994 skb = skb_peek_next(skb, &sk->sk_receive_queue);
1999 unix_state_unlock(sk);
2002 /* Never glue messages from different writers */
2003 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
2004 !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) ||
2005 !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid))
2007 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2008 /* Copy credentials */
2009 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2013 /* Copy address just once */
2015 unix_copy_addr(msg, skb->sk);
2019 chunk = min_t(unsigned int, skb->len - skip, size);
2020 if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
2028 /* Mark read part of skb as used */
2029 if (!(flags & MSG_PEEK)) {
2030 skb_pull(skb, chunk);
2032 sk_peek_offset_bwd(sk, chunk);
2035 unix_detach_fds(siocb->scm, skb);
2040 skb_unlink(skb, &sk->sk_receive_queue);
2046 /* It is questionable, see note in unix_dgram_recvmsg.
2049 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2051 sk_peek_offset_fwd(sk, chunk);
2057 mutex_unlock(&u->readlock);
2058 scm_recv(sock, msg, siocb->scm, flags);
2060 return copied ? : err;
2063 static int unix_shutdown(struct socket *sock, int mode)
2065 struct sock *sk = sock->sk;
2068 if (mode < SHUT_RD || mode > SHUT_RDWR)
2071 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2072 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2073 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2077 unix_state_lock(sk);
2078 sk->sk_shutdown |= mode;
2079 other = unix_peer(sk);
2082 unix_state_unlock(sk);
2083 sk->sk_state_change(sk);
2086 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2090 if (mode&RCV_SHUTDOWN)
2091 peer_mode |= SEND_SHUTDOWN;
2092 if (mode&SEND_SHUTDOWN)
2093 peer_mode |= RCV_SHUTDOWN;
2094 unix_state_lock(other);
2095 other->sk_shutdown |= peer_mode;
2096 unix_state_unlock(other);
2097 other->sk_state_change(other);
2098 if (peer_mode == SHUTDOWN_MASK)
2099 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2100 else if (peer_mode & RCV_SHUTDOWN)
2101 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2109 long unix_inq_len(struct sock *sk)
2111 struct sk_buff *skb;
2114 if (sk->sk_state == TCP_LISTEN)
2117 spin_lock(&sk->sk_receive_queue.lock);
2118 if (sk->sk_type == SOCK_STREAM ||
2119 sk->sk_type == SOCK_SEQPACKET) {
2120 skb_queue_walk(&sk->sk_receive_queue, skb)
2123 skb = skb_peek(&sk->sk_receive_queue);
2127 spin_unlock(&sk->sk_receive_queue.lock);
2131 EXPORT_SYMBOL_GPL(unix_inq_len);
2133 long unix_outq_len(struct sock *sk)
2135 return sk_wmem_alloc_get(sk);
2137 EXPORT_SYMBOL_GPL(unix_outq_len);
2139 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2141 struct sock *sk = sock->sk;
2147 amount = unix_outq_len(sk);
2148 err = put_user(amount, (int __user *)arg);
2151 amount = unix_inq_len(sk);
2155 err = put_user(amount, (int __user *)arg);
2164 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2166 struct sock *sk = sock->sk;
2169 sock_poll_wait(file, sk_sleep(sk), wait);
2172 /* exceptional events? */
2175 if (sk->sk_shutdown == SHUTDOWN_MASK)
2177 if (sk->sk_shutdown & RCV_SHUTDOWN)
2178 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2181 if (!skb_queue_empty(&sk->sk_receive_queue))
2182 mask |= POLLIN | POLLRDNORM;
2184 /* Connection-based need to check for termination and startup */
2185 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2186 sk->sk_state == TCP_CLOSE)
2190 * we set writable also when the other side has shut down the
2191 * connection. This prevents stuck sockets.
2193 if (unix_writable(sk))
2194 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2199 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2202 struct sock *sk = sock->sk, *other;
2203 unsigned int mask, writable;
2205 sock_poll_wait(file, sk_sleep(sk), wait);
2208 /* exceptional events? */
2209 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2211 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2213 if (sk->sk_shutdown & RCV_SHUTDOWN)
2214 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2215 if (sk->sk_shutdown == SHUTDOWN_MASK)
2219 if (!skb_queue_empty(&sk->sk_receive_queue))
2220 mask |= POLLIN | POLLRDNORM;
2222 /* Connection-based need to check for termination and startup */
2223 if (sk->sk_type == SOCK_SEQPACKET) {
2224 if (sk->sk_state == TCP_CLOSE)
2226 /* connection hasn't started yet? */
2227 if (sk->sk_state == TCP_SYN_SENT)
2231 /* No write status requested, avoid expensive OUT tests. */
2232 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2235 writable = unix_writable(sk);
2236 other = unix_peer_get(sk);
2238 if (unix_peer(other) != sk) {
2239 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2240 if (unix_recvq_full(other))
2247 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2249 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2254 #ifdef CONFIG_PROC_FS
2256 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2258 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2259 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2260 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2262 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2264 unsigned long offset = get_offset(*pos);
2265 unsigned long bucket = get_bucket(*pos);
2267 unsigned long count = 0;
2269 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2270 if (sock_net(sk) != seq_file_net(seq))
2272 if (++count == offset)
2279 static struct sock *unix_next_socket(struct seq_file *seq,
2283 unsigned long bucket;
2285 while (sk > (struct sock *)SEQ_START_TOKEN) {
2289 if (sock_net(sk) == seq_file_net(seq))
2294 sk = unix_from_bucket(seq, pos);
2299 bucket = get_bucket(*pos) + 1;
2300 *pos = set_bucket_offset(bucket, 1);
2301 } while (bucket < ARRAY_SIZE(unix_socket_table));
2306 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2307 __acquires(unix_table_lock)
2309 spin_lock(&unix_table_lock);
2312 return SEQ_START_TOKEN;
2314 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2317 return unix_next_socket(seq, NULL, pos);
2320 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2323 return unix_next_socket(seq, v, pos);
2326 static void unix_seq_stop(struct seq_file *seq, void *v)
2327 __releases(unix_table_lock)
2329 spin_unlock(&unix_table_lock);
2332 static int unix_seq_show(struct seq_file *seq, void *v)
2335 if (v == SEQ_START_TOKEN)
2336 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2340 struct unix_sock *u = unix_sk(s);
2343 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2345 atomic_read(&s->sk_refcnt),
2347 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2350 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2351 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2359 len = u->addr->len - sizeof(short);
2360 if (!UNIX_ABSTRACT(s))
2366 for ( ; i < len; i++)
2367 seq_putc(seq, u->addr->name->sun_path[i]);
2369 unix_state_unlock(s);
2370 seq_putc(seq, '\n');
2376 static const struct seq_operations unix_seq_ops = {
2377 .start = unix_seq_start,
2378 .next = unix_seq_next,
2379 .stop = unix_seq_stop,
2380 .show = unix_seq_show,
2383 static int unix_seq_open(struct inode *inode, struct file *file)
2385 return seq_open_net(inode, file, &unix_seq_ops,
2386 sizeof(struct seq_net_private));
2389 static const struct file_operations unix_seq_fops = {
2390 .owner = THIS_MODULE,
2391 .open = unix_seq_open,
2393 .llseek = seq_lseek,
2394 .release = seq_release_net,
2399 static const struct net_proto_family unix_family_ops = {
2401 .create = unix_create,
2402 .owner = THIS_MODULE,
2406 static int __net_init unix_net_init(struct net *net)
2408 int error = -ENOMEM;
2410 net->unx.sysctl_max_dgram_qlen = 10;
2411 if (unix_sysctl_register(net))
2414 #ifdef CONFIG_PROC_FS
2415 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2416 unix_sysctl_unregister(net);
2425 static void __net_exit unix_net_exit(struct net *net)
2427 unix_sysctl_unregister(net);
2428 remove_proc_entry("unix", net->proc_net);
2431 static struct pernet_operations unix_net_ops = {
2432 .init = unix_net_init,
2433 .exit = unix_net_exit,
2436 static int __init af_unix_init(void)
2440 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2442 rc = proto_register(&unix_proto, 1);
2444 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2449 sock_register(&unix_family_ops);
2450 register_pernet_subsys(&unix_net_ops);
2455 static void __exit af_unix_exit(void)
2457 sock_unregister(PF_UNIX);
2458 proto_unregister(&unix_proto);
2459 unregister_pernet_subsys(&unix_net_ops);
2462 /* Earlier than device_initcall() so that other drivers invoking
2463 request_module() don't end up in a loop when modprobe tries
2464 to use a UNIX socket. But later than subsys_initcall() because
2465 we depend on stuff initialised there */
2466 fs_initcall(af_unix_init);
2467 module_exit(af_unix_exit);
2469 MODULE_LICENSE("GPL");
2470 MODULE_ALIAS_NETPROTO(PF_UNIX);