2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
83 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/net_namespace.h>
107 #include <net/sock.h>
108 #include <net/tcp_states.h>
109 #include <net/af_unix.h>
110 #include <linux/proc_fs.h>
111 #include <linux/seq_file.h>
113 #include <linux/init.h>
114 #include <linux/poll.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
119 #include <linux/freezer.h>
121 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
122 EXPORT_SYMBOL_GPL(unix_socket_table);
123 DEFINE_SPINLOCK(unix_table_lock);
124 EXPORT_SYMBOL_GPL(unix_table_lock);
125 static atomic_long_t unix_nr_socks;
128 static struct hlist_head *unix_sockets_unbound(void *addr)
130 unsigned long hash = (unsigned long)addr;
134 hash %= UNIX_HASH_SIZE;
135 return &unix_socket_table[UNIX_HASH_SIZE + hash];
138 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
140 #ifdef CONFIG_SECURITY_NETWORK
141 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
143 UNIXCB(skb).secid = scm->secid;
146 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
148 scm->secid = UNIXCB(skb).secid;
151 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
153 return (scm->secid == UNIXCB(skb).secid);
156 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
159 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
162 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
166 #endif /* CONFIG_SECURITY_NETWORK */
169 * SMP locking strategy:
170 * hash table is protected with spinlock unix_table_lock
171 * each socket state is protected by separate spin lock.
174 static inline unsigned int unix_hash_fold(__wsum n)
176 unsigned int hash = (__force unsigned int)csum_fold(n);
179 return hash&(UNIX_HASH_SIZE-1);
182 #define unix_peer(sk) (unix_sk(sk)->peer)
184 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
186 return unix_peer(osk) == sk;
189 static inline int unix_may_send(struct sock *sk, struct sock *osk)
191 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
194 static inline int unix_recvq_full(struct sock const *sk)
196 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
199 struct sock *unix_peer_get(struct sock *s)
207 unix_state_unlock(s);
210 EXPORT_SYMBOL_GPL(unix_peer_get);
212 static inline void unix_release_addr(struct unix_address *addr)
214 if (atomic_dec_and_test(&addr->refcnt))
219 * Check unix socket name:
220 * - should be not zero length.
221 * - if started by not zero, should be NULL terminated (FS object)
222 * - if started by zero, it is abstract name.
225 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
227 if (len <= sizeof(short) || len > sizeof(*sunaddr))
229 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
231 if (sunaddr->sun_path[0]) {
233 * This may look like an off by one error but it is a bit more
234 * subtle. 108 is the longest valid AF_UNIX path for a binding.
235 * sun_path[108] doesn't as such exist. However in kernel space
236 * we are guaranteed that it is a valid memory location in our
237 * kernel address buffer.
239 ((char *)sunaddr)[len] = 0;
240 len = strlen(sunaddr->sun_path)+1+sizeof(short);
244 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
248 static void __unix_remove_socket(struct sock *sk)
250 sk_del_node_init(sk);
253 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
255 WARN_ON(!sk_unhashed(sk));
256 sk_add_node(sk, list);
259 static inline void unix_remove_socket(struct sock *sk)
261 spin_lock(&unix_table_lock);
262 __unix_remove_socket(sk);
263 spin_unlock(&unix_table_lock);
266 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
268 spin_lock(&unix_table_lock);
269 __unix_insert_socket(list, sk);
270 spin_unlock(&unix_table_lock);
273 static struct sock *__unix_find_socket_byname(struct net *net,
274 struct sockaddr_un *sunname,
275 int len, int type, unsigned int hash)
279 sk_for_each(s, &unix_socket_table[hash ^ type]) {
280 struct unix_sock *u = unix_sk(s);
282 if (!net_eq(sock_net(s), net))
285 if (u->addr->len == len &&
286 !memcmp(u->addr->name, sunname, len))
294 static inline struct sock *unix_find_socket_byname(struct net *net,
295 struct sockaddr_un *sunname,
301 spin_lock(&unix_table_lock);
302 s = __unix_find_socket_byname(net, sunname, len, type, hash);
305 spin_unlock(&unix_table_lock);
309 static struct sock *unix_find_socket_byinode(struct inode *i)
313 spin_lock(&unix_table_lock);
315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
316 struct dentry *dentry = unix_sk(s)->path.dentry;
318 if (dentry && d_backing_inode(dentry) == i) {
325 spin_unlock(&unix_table_lock);
329 static int unix_writable(const struct sock *sk)
331 return sk->sk_state != TCP_LISTEN &&
332 (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
335 static void unix_write_space(struct sock *sk)
337 struct socket_wq *wq;
340 if (unix_writable(sk)) {
341 wq = rcu_dereference(sk->sk_wq);
342 if (wq_has_sleeper(wq))
343 wake_up_interruptible_sync_poll(&wq->wait,
344 POLLOUT | POLLWRNORM | POLLWRBAND);
345 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
350 /* When dgram socket disconnects (or changes its peer), we clear its receive
351 * queue of packets arrived from previous peer. First, it allows to do
352 * flow control based only on wmem_alloc; second, sk connected to peer
353 * may receive messages only from that peer. */
354 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
356 if (!skb_queue_empty(&sk->sk_receive_queue)) {
357 skb_queue_purge(&sk->sk_receive_queue);
358 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
360 /* If one link of bidirectional dgram pipe is disconnected,
361 * we signal error. Messages are lost. Do not make this,
362 * when peer was not connected to us.
364 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
365 other->sk_err = ECONNRESET;
366 other->sk_error_report(other);
371 static void unix_sock_destructor(struct sock *sk)
373 struct unix_sock *u = unix_sk(sk);
375 skb_queue_purge(&sk->sk_receive_queue);
377 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
378 WARN_ON(!sk_unhashed(sk));
379 WARN_ON(sk->sk_socket);
380 if (!sock_flag(sk, SOCK_DEAD)) {
381 pr_info("Attempt to release alive unix socket: %p\n", sk);
386 unix_release_addr(u->addr);
388 atomic_long_dec(&unix_nr_socks);
390 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
392 #ifdef UNIX_REFCNT_DEBUG
393 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
394 atomic_long_read(&unix_nr_socks));
398 static void unix_release_sock(struct sock *sk, int embrion)
400 struct unix_sock *u = unix_sk(sk);
406 unix_remove_socket(sk);
411 sk->sk_shutdown = SHUTDOWN_MASK;
413 u->path.dentry = NULL;
415 state = sk->sk_state;
416 sk->sk_state = TCP_CLOSE;
417 unix_state_unlock(sk);
419 wake_up_interruptible_all(&u->peer_wait);
421 skpair = unix_peer(sk);
423 if (skpair != NULL) {
424 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
425 unix_state_lock(skpair);
427 skpair->sk_shutdown = SHUTDOWN_MASK;
428 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
429 skpair->sk_err = ECONNRESET;
430 unix_state_unlock(skpair);
431 skpair->sk_state_change(skpair);
432 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
434 sock_put(skpair); /* It may now die */
435 unix_peer(sk) = NULL;
438 /* Try to flush out this socket. Throw out buffers at least */
440 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
441 if (state == TCP_LISTEN)
442 unix_release_sock(skb->sk, 1);
443 /* passed fds are erased in the kfree_skb hook */
452 /* ---- Socket is dead now and most probably destroyed ---- */
455 * Fixme: BSD difference: In BSD all sockets connected to us get
456 * ECONNRESET and we die on the spot. In Linux we behave
457 * like files and pipes do and wait for the last
460 * Can't we simply set sock->err?
462 * What the above comment does talk about? --ANK(980817)
465 if (unix_tot_inflight)
466 unix_gc(); /* Garbage collect fds */
469 static void init_peercred(struct sock *sk)
471 put_pid(sk->sk_peer_pid);
472 if (sk->sk_peer_cred)
473 put_cred(sk->sk_peer_cred);
474 sk->sk_peer_pid = get_pid(task_tgid(current));
475 sk->sk_peer_cred = get_current_cred();
478 static void copy_peercred(struct sock *sk, struct sock *peersk)
480 put_pid(sk->sk_peer_pid);
481 if (sk->sk_peer_cred)
482 put_cred(sk->sk_peer_cred);
483 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
484 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
487 static int unix_listen(struct socket *sock, int backlog)
490 struct sock *sk = sock->sk;
491 struct unix_sock *u = unix_sk(sk);
492 struct pid *old_pid = NULL;
495 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
496 goto out; /* Only stream/seqpacket sockets accept */
499 goto out; /* No listens on an unbound socket */
501 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
503 if (backlog > sk->sk_max_ack_backlog)
504 wake_up_interruptible_all(&u->peer_wait);
505 sk->sk_max_ack_backlog = backlog;
506 sk->sk_state = TCP_LISTEN;
507 /* set credentials so connect can copy them */
512 unix_state_unlock(sk);
518 static int unix_release(struct socket *);
519 static int unix_bind(struct socket *, struct sockaddr *, int);
520 static int unix_stream_connect(struct socket *, struct sockaddr *,
521 int addr_len, int flags);
522 static int unix_socketpair(struct socket *, struct socket *);
523 static int unix_accept(struct socket *, struct socket *, int);
524 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
525 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
526 static unsigned int unix_dgram_poll(struct file *, struct socket *,
528 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
529 static int unix_shutdown(struct socket *, int);
530 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
531 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
532 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
533 size_t size, int flags);
534 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
535 struct pipe_inode_info *, size_t size,
537 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
538 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
539 static int unix_dgram_connect(struct socket *, struct sockaddr *,
541 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
542 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
545 static int unix_set_peek_off(struct sock *sk, int val)
547 struct unix_sock *u = unix_sk(sk);
549 if (mutex_lock_interruptible(&u->readlock))
552 sk->sk_peek_off = val;
553 mutex_unlock(&u->readlock);
559 static const struct proto_ops unix_stream_ops = {
561 .owner = THIS_MODULE,
562 .release = unix_release,
564 .connect = unix_stream_connect,
565 .socketpair = unix_socketpair,
566 .accept = unix_accept,
567 .getname = unix_getname,
570 .listen = unix_listen,
571 .shutdown = unix_shutdown,
572 .setsockopt = sock_no_setsockopt,
573 .getsockopt = sock_no_getsockopt,
574 .sendmsg = unix_stream_sendmsg,
575 .recvmsg = unix_stream_recvmsg,
576 .mmap = sock_no_mmap,
577 .sendpage = unix_stream_sendpage,
578 .splice_read = unix_stream_splice_read,
579 .set_peek_off = unix_set_peek_off,
582 static const struct proto_ops unix_dgram_ops = {
584 .owner = THIS_MODULE,
585 .release = unix_release,
587 .connect = unix_dgram_connect,
588 .socketpair = unix_socketpair,
589 .accept = sock_no_accept,
590 .getname = unix_getname,
591 .poll = unix_dgram_poll,
593 .listen = sock_no_listen,
594 .shutdown = unix_shutdown,
595 .setsockopt = sock_no_setsockopt,
596 .getsockopt = sock_no_getsockopt,
597 .sendmsg = unix_dgram_sendmsg,
598 .recvmsg = unix_dgram_recvmsg,
599 .mmap = sock_no_mmap,
600 .sendpage = sock_no_sendpage,
601 .set_peek_off = unix_set_peek_off,
604 static const struct proto_ops unix_seqpacket_ops = {
606 .owner = THIS_MODULE,
607 .release = unix_release,
609 .connect = unix_stream_connect,
610 .socketpair = unix_socketpair,
611 .accept = unix_accept,
612 .getname = unix_getname,
613 .poll = unix_dgram_poll,
615 .listen = unix_listen,
616 .shutdown = unix_shutdown,
617 .setsockopt = sock_no_setsockopt,
618 .getsockopt = sock_no_getsockopt,
619 .sendmsg = unix_seqpacket_sendmsg,
620 .recvmsg = unix_seqpacket_recvmsg,
621 .mmap = sock_no_mmap,
622 .sendpage = sock_no_sendpage,
623 .set_peek_off = unix_set_peek_off,
626 static struct proto unix_proto = {
628 .owner = THIS_MODULE,
629 .obj_size = sizeof(struct unix_sock),
633 * AF_UNIX sockets do not interact with hardware, hence they
634 * dont trigger interrupts - so it's safe for them to have
635 * bh-unsafe locking for their sk_receive_queue.lock. Split off
636 * this special lock-class by reinitializing the spinlock key:
638 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
640 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
642 struct sock *sk = NULL;
645 atomic_long_inc(&unix_nr_socks);
646 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
649 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
653 sock_init_data(sock, sk);
654 lockdep_set_class(&sk->sk_receive_queue.lock,
655 &af_unix_sk_receive_queue_lock_key);
657 sk->sk_write_space = unix_write_space;
658 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
659 sk->sk_destruct = unix_sock_destructor;
661 u->path.dentry = NULL;
663 spin_lock_init(&u->lock);
664 atomic_long_set(&u->inflight, 0);
665 INIT_LIST_HEAD(&u->link);
666 mutex_init(&u->readlock); /* single task reading lock */
667 init_waitqueue_head(&u->peer_wait);
668 unix_insert_socket(unix_sockets_unbound(sk), sk);
671 atomic_long_dec(&unix_nr_socks);
674 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
680 static int unix_create(struct net *net, struct socket *sock, int protocol,
683 if (protocol && protocol != PF_UNIX)
684 return -EPROTONOSUPPORT;
686 sock->state = SS_UNCONNECTED;
688 switch (sock->type) {
690 sock->ops = &unix_stream_ops;
693 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
697 sock->type = SOCK_DGRAM;
699 sock->ops = &unix_dgram_ops;
702 sock->ops = &unix_seqpacket_ops;
705 return -ESOCKTNOSUPPORT;
708 return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
711 static int unix_release(struct socket *sock)
713 struct sock *sk = sock->sk;
718 unix_release_sock(sk, 0);
724 static int unix_autobind(struct socket *sock)
726 struct sock *sk = sock->sk;
727 struct net *net = sock_net(sk);
728 struct unix_sock *u = unix_sk(sk);
729 static u32 ordernum = 1;
730 struct unix_address *addr;
732 unsigned int retries = 0;
734 err = mutex_lock_interruptible(&u->readlock);
743 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
747 addr->name->sun_family = AF_UNIX;
748 atomic_set(&addr->refcnt, 1);
751 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
752 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
754 spin_lock(&unix_table_lock);
755 ordernum = (ordernum+1)&0xFFFFF;
757 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
759 spin_unlock(&unix_table_lock);
761 * __unix_find_socket_byname() may take long time if many names
762 * are already in use.
765 /* Give up if all names seems to be in use. */
766 if (retries++ == 0xFFFFF) {
773 addr->hash ^= sk->sk_type;
775 __unix_remove_socket(sk);
777 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
778 spin_unlock(&unix_table_lock);
781 out: mutex_unlock(&u->readlock);
785 static struct sock *unix_find_other(struct net *net,
786 struct sockaddr_un *sunname, int len,
787 int type, unsigned int hash, int *error)
793 if (sunname->sun_path[0]) {
795 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
798 inode = d_backing_inode(path.dentry);
799 err = inode_permission(inode, MAY_WRITE);
804 if (!S_ISSOCK(inode->i_mode))
806 u = unix_find_socket_byinode(inode);
810 if (u->sk_type == type)
816 if (u->sk_type != type) {
822 u = unix_find_socket_byname(net, sunname, len, type, hash);
824 struct dentry *dentry;
825 dentry = unix_sk(u)->path.dentry;
827 touch_atime(&unix_sk(u)->path);
840 static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
842 struct dentry *dentry;
846 * Get the parent directory, calculate the hash for last
849 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
850 err = PTR_ERR(dentry);
855 * All right, let's create it.
857 err = security_path_mknod(&path, dentry, mode, 0);
859 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
861 res->mnt = mntget(path.mnt);
862 res->dentry = dget(dentry);
865 done_path_create(&path, dentry);
869 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
871 struct sock *sk = sock->sk;
872 struct net *net = sock_net(sk);
873 struct unix_sock *u = unix_sk(sk);
874 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
875 char *sun_path = sunaddr->sun_path;
878 struct unix_address *addr;
879 struct hlist_head *list;
882 if (sunaddr->sun_family != AF_UNIX)
885 if (addr_len == sizeof(short)) {
886 err = unix_autobind(sock);
890 err = unix_mkname(sunaddr, addr_len, &hash);
895 err = mutex_lock_interruptible(&u->readlock);
904 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
908 memcpy(addr->name, sunaddr, addr_len);
909 addr->len = addr_len;
910 addr->hash = hash ^ sk->sk_type;
911 atomic_set(&addr->refcnt, 1);
915 umode_t mode = S_IFSOCK |
916 (SOCK_INODE(sock)->i_mode & ~current_umask());
917 err = unix_mknod(sun_path, mode, &path);
921 unix_release_addr(addr);
924 addr->hash = UNIX_HASH_SIZE;
925 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE-1);
926 spin_lock(&unix_table_lock);
928 list = &unix_socket_table[hash];
930 spin_lock(&unix_table_lock);
932 if (__unix_find_socket_byname(net, sunaddr, addr_len,
933 sk->sk_type, hash)) {
934 unix_release_addr(addr);
938 list = &unix_socket_table[addr->hash];
942 __unix_remove_socket(sk);
944 __unix_insert_socket(list, sk);
947 spin_unlock(&unix_table_lock);
949 mutex_unlock(&u->readlock);
954 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
956 if (unlikely(sk1 == sk2) || !sk2) {
957 unix_state_lock(sk1);
961 unix_state_lock(sk1);
962 unix_state_lock_nested(sk2);
964 unix_state_lock(sk2);
965 unix_state_lock_nested(sk1);
969 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
971 if (unlikely(sk1 == sk2) || !sk2) {
972 unix_state_unlock(sk1);
975 unix_state_unlock(sk1);
976 unix_state_unlock(sk2);
979 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
982 struct sock *sk = sock->sk;
983 struct net *net = sock_net(sk);
984 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
989 if (addr->sa_family != AF_UNSPEC) {
990 err = unix_mkname(sunaddr, alen, &hash);
995 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
996 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1000 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1004 unix_state_double_lock(sk, other);
1006 /* Apparently VFS overslept socket death. Retry. */
1007 if (sock_flag(other, SOCK_DEAD)) {
1008 unix_state_double_unlock(sk, other);
1014 if (!unix_may_send(sk, other))
1017 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1023 * 1003.1g breaking connected state with AF_UNSPEC
1026 unix_state_double_lock(sk, other);
1030 * If it was connected, reconnect.
1032 if (unix_peer(sk)) {
1033 struct sock *old_peer = unix_peer(sk);
1034 unix_peer(sk) = other;
1035 unix_state_double_unlock(sk, other);
1037 if (other != old_peer)
1038 unix_dgram_disconnected(sk, old_peer);
1041 unix_peer(sk) = other;
1042 unix_state_double_unlock(sk, other);
1047 unix_state_double_unlock(sk, other);
1053 static long unix_wait_for_peer(struct sock *other, long timeo)
1055 struct unix_sock *u = unix_sk(other);
1059 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1061 sched = !sock_flag(other, SOCK_DEAD) &&
1062 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1063 unix_recvq_full(other);
1065 unix_state_unlock(other);
1068 timeo = schedule_timeout(timeo);
1070 finish_wait(&u->peer_wait, &wait);
1074 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1075 int addr_len, int flags)
1077 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1078 struct sock *sk = sock->sk;
1079 struct net *net = sock_net(sk);
1080 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1081 struct sock *newsk = NULL;
1082 struct sock *other = NULL;
1083 struct sk_buff *skb = NULL;
1089 err = unix_mkname(sunaddr, addr_len, &hash);
1094 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1095 (err = unix_autobind(sock)) != 0)
1098 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1100 /* First of all allocate resources.
1101 If we will make it after state is locked,
1102 we will have to recheck all again in any case.
1107 /* create new sock for complete connection */
1108 newsk = unix_create1(sock_net(sk), NULL, 0);
1112 /* Allocate skb for sending to listening sock */
1113 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1118 /* Find listening sock. */
1119 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1123 /* Latch state of peer */
1124 unix_state_lock(other);
1126 /* Apparently VFS overslept socket death. Retry. */
1127 if (sock_flag(other, SOCK_DEAD)) {
1128 unix_state_unlock(other);
1133 err = -ECONNREFUSED;
1134 if (other->sk_state != TCP_LISTEN)
1136 if (other->sk_shutdown & RCV_SHUTDOWN)
1139 if (unix_recvq_full(other)) {
1144 timeo = unix_wait_for_peer(other, timeo);
1146 err = sock_intr_errno(timeo);
1147 if (signal_pending(current))
1155 It is tricky place. We need to grab our state lock and cannot
1156 drop lock on peer. It is dangerous because deadlock is
1157 possible. Connect to self case and simultaneous
1158 attempt to connect are eliminated by checking socket
1159 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1160 check this before attempt to grab lock.
1162 Well, and we have to recheck the state after socket locked.
1168 /* This is ok... continue with connect */
1170 case TCP_ESTABLISHED:
1171 /* Socket is already connected */
1179 unix_state_lock_nested(sk);
1181 if (sk->sk_state != st) {
1182 unix_state_unlock(sk);
1183 unix_state_unlock(other);
1188 err = security_unix_stream_connect(sk, other, newsk);
1190 unix_state_unlock(sk);
1194 /* The way is open! Fastly set all the necessary fields... */
1197 unix_peer(newsk) = sk;
1198 newsk->sk_state = TCP_ESTABLISHED;
1199 newsk->sk_type = sk->sk_type;
1200 init_peercred(newsk);
1201 newu = unix_sk(newsk);
1202 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1203 otheru = unix_sk(other);
1205 /* copy address information from listening to new sock*/
1207 atomic_inc(&otheru->addr->refcnt);
1208 newu->addr = otheru->addr;
1210 if (otheru->path.dentry) {
1211 path_get(&otheru->path);
1212 newu->path = otheru->path;
1215 /* Set credentials */
1216 copy_peercred(sk, other);
1218 sock->state = SS_CONNECTED;
1219 sk->sk_state = TCP_ESTABLISHED;
1222 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1223 unix_peer(sk) = newsk;
1225 unix_state_unlock(sk);
1227 /* take ten and and send info to listening sock */
1228 spin_lock(&other->sk_receive_queue.lock);
1229 __skb_queue_tail(&other->sk_receive_queue, skb);
1230 spin_unlock(&other->sk_receive_queue.lock);
1231 unix_state_unlock(other);
1232 other->sk_data_ready(other);
1238 unix_state_unlock(other);
1243 unix_release_sock(newsk, 0);
1249 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1251 struct sock *ska = socka->sk, *skb = sockb->sk;
1253 /* Join our sockets back to back */
1256 unix_peer(ska) = skb;
1257 unix_peer(skb) = ska;
1261 if (ska->sk_type != SOCK_DGRAM) {
1262 ska->sk_state = TCP_ESTABLISHED;
1263 skb->sk_state = TCP_ESTABLISHED;
1264 socka->state = SS_CONNECTED;
1265 sockb->state = SS_CONNECTED;
1270 static void unix_sock_inherit_flags(const struct socket *old,
1273 if (test_bit(SOCK_PASSCRED, &old->flags))
1274 set_bit(SOCK_PASSCRED, &new->flags);
1275 if (test_bit(SOCK_PASSSEC, &old->flags))
1276 set_bit(SOCK_PASSSEC, &new->flags);
1279 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1281 struct sock *sk = sock->sk;
1283 struct sk_buff *skb;
1287 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1291 if (sk->sk_state != TCP_LISTEN)
1294 /* If socket state is TCP_LISTEN it cannot change (for now...),
1295 * so that no locks are necessary.
1298 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1300 /* This means receive shutdown. */
1307 skb_free_datagram(sk, skb);
1308 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1310 /* attach accepted sock to socket */
1311 unix_state_lock(tsk);
1312 newsock->state = SS_CONNECTED;
1313 unix_sock_inherit_flags(sock, newsock);
1314 sock_graft(tsk, newsock);
1315 unix_state_unlock(tsk);
1323 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1325 struct sock *sk = sock->sk;
1326 struct unix_sock *u;
1327 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1331 sk = unix_peer_get(sk);
1342 unix_state_lock(sk);
1344 sunaddr->sun_family = AF_UNIX;
1345 sunaddr->sun_path[0] = 0;
1346 *uaddr_len = sizeof(short);
1348 struct unix_address *addr = u->addr;
1350 *uaddr_len = addr->len;
1351 memcpy(sunaddr, addr->name, *uaddr_len);
1353 unix_state_unlock(sk);
1359 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1363 scm->fp = UNIXCB(skb).fp;
1364 UNIXCB(skb).fp = NULL;
1366 for (i = scm->fp->count-1; i >= 0; i--)
1367 unix_notinflight(scm->fp->fp[i]);
1370 static void unix_destruct_scm(struct sk_buff *skb)
1372 struct scm_cookie scm;
1373 memset(&scm, 0, sizeof(scm));
1374 scm.pid = UNIXCB(skb).pid;
1376 unix_detach_fds(&scm, skb);
1378 /* Alas, it calls VFS */
1379 /* So fscking what? fput() had been SMP-safe since the last Summer */
1384 #define MAX_RECURSION_LEVEL 4
1386 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1389 unsigned char max_level = 0;
1390 int unix_sock_count = 0;
1392 for (i = scm->fp->count - 1; i >= 0; i--) {
1393 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1397 max_level = max(max_level,
1398 unix_sk(sk)->recursion_level);
1401 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1402 return -ETOOMANYREFS;
1405 * Need to duplicate file references for the sake of garbage
1406 * collection. Otherwise a socket in the fps might become a
1407 * candidate for GC while the skb is not yet queued.
1409 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1410 if (!UNIXCB(skb).fp)
1413 if (unix_sock_count) {
1414 for (i = scm->fp->count - 1; i >= 0; i--)
1415 unix_inflight(scm->fp->fp[i]);
1420 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1424 UNIXCB(skb).pid = get_pid(scm->pid);
1425 UNIXCB(skb).uid = scm->creds.uid;
1426 UNIXCB(skb).gid = scm->creds.gid;
1427 UNIXCB(skb).fp = NULL;
1428 unix_get_secdata(scm, skb);
1429 if (scm->fp && send_fds)
1430 err = unix_attach_fds(scm, skb);
1432 skb->destructor = unix_destruct_scm;
1437 * Some apps rely on write() giving SCM_CREDENTIALS
1438 * We include credentials if source or destination socket
1439 * asserted SOCK_PASSCRED.
1441 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1442 const struct sock *other)
1444 if (UNIXCB(skb).pid)
1446 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1447 !other->sk_socket ||
1448 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1449 UNIXCB(skb).pid = get_pid(task_tgid(current));
1450 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1455 * Send AF_UNIX data.
1458 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1461 struct sock *sk = sock->sk;
1462 struct net *net = sock_net(sk);
1463 struct unix_sock *u = unix_sk(sk);
1464 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1465 struct sock *other = NULL;
1466 int namelen = 0; /* fake GCC */
1469 struct sk_buff *skb;
1471 struct scm_cookie scm;
1476 err = scm_send(sock, msg, &scm, false);
1481 if (msg->msg_flags&MSG_OOB)
1484 if (msg->msg_namelen) {
1485 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1492 other = unix_peer_get(sk);
1497 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1498 && (err = unix_autobind(sock)) != 0)
1502 if (len > sk->sk_sndbuf - 32)
1505 if (len > SKB_MAX_ALLOC) {
1506 data_len = min_t(size_t,
1507 len - SKB_MAX_ALLOC,
1508 MAX_SKB_FRAGS * PAGE_SIZE);
1509 data_len = PAGE_ALIGN(data_len);
1511 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1514 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1515 msg->msg_flags & MSG_DONTWAIT, &err,
1516 PAGE_ALLOC_COSTLY_ORDER);
1520 err = unix_scm_to_skb(&scm, skb, true);
1523 max_level = err + 1;
1525 skb_put(skb, len - data_len);
1526 skb->data_len = data_len;
1528 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1532 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1537 if (sunaddr == NULL)
1540 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1546 if (sk_filter(other, skb) < 0) {
1547 /* Toss the packet but do not return any error to the sender */
1552 unix_state_lock(other);
1554 if (!unix_may_send(sk, other))
1557 if (sock_flag(other, SOCK_DEAD)) {
1559 * Check with 1003.1g - what should
1562 unix_state_unlock(other);
1566 unix_state_lock(sk);
1567 if (unix_peer(sk) == other) {
1568 unix_peer(sk) = NULL;
1569 unix_state_unlock(sk);
1571 unix_dgram_disconnected(sk, other);
1573 err = -ECONNREFUSED;
1575 unix_state_unlock(sk);
1585 if (other->sk_shutdown & RCV_SHUTDOWN)
1588 if (sk->sk_type != SOCK_SEQPACKET) {
1589 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1594 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1600 timeo = unix_wait_for_peer(other, timeo);
1602 err = sock_intr_errno(timeo);
1603 if (signal_pending(current))
1609 if (sock_flag(other, SOCK_RCVTSTAMP))
1610 __net_timestamp(skb);
1611 maybe_add_creds(skb, sock, other);
1612 skb_queue_tail(&other->sk_receive_queue, skb);
1613 if (max_level > unix_sk(other)->recursion_level)
1614 unix_sk(other)->recursion_level = max_level;
1615 unix_state_unlock(other);
1616 other->sk_data_ready(other);
1622 unix_state_unlock(other);
1632 /* We use paged skbs for stream sockets, and limit occupancy to 32768
1633 * bytes, and a minimun of a full page.
1635 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1637 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1640 struct sock *sk = sock->sk;
1641 struct sock *other = NULL;
1643 struct sk_buff *skb;
1645 struct scm_cookie scm;
1646 bool fds_sent = false;
1651 err = scm_send(sock, msg, &scm, false);
1656 if (msg->msg_flags&MSG_OOB)
1659 if (msg->msg_namelen) {
1660 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1664 other = unix_peer(sk);
1669 if (sk->sk_shutdown & SEND_SHUTDOWN)
1672 while (sent < len) {
1675 /* Keep two messages in the pipe so it schedules better */
1676 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1678 /* allow fallback to order-0 allocations */
1679 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1681 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1683 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1685 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1686 msg->msg_flags & MSG_DONTWAIT, &err,
1687 get_order(UNIX_SKB_FRAGS_SZ));
1691 /* Only send the fds in the first buffer */
1692 err = unix_scm_to_skb(&scm, skb, !fds_sent);
1697 max_level = err + 1;
1700 skb_put(skb, size - data_len);
1701 skb->data_len = data_len;
1703 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1709 unix_state_lock(other);
1711 if (sock_flag(other, SOCK_DEAD) ||
1712 (other->sk_shutdown & RCV_SHUTDOWN))
1715 maybe_add_creds(skb, sock, other);
1716 skb_queue_tail(&other->sk_receive_queue, skb);
1717 if (max_level > unix_sk(other)->recursion_level)
1718 unix_sk(other)->recursion_level = max_level;
1719 unix_state_unlock(other);
1720 other->sk_data_ready(other);
1729 unix_state_unlock(other);
1732 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1733 send_sig(SIGPIPE, current, 0);
1737 return sent ? : err;
1740 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1741 int offset, size_t size, int flags)
1744 bool send_sigpipe = true;
1745 struct sock *other, *sk = socket->sk;
1746 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1748 if (flags & MSG_OOB)
1751 other = unix_peer(sk);
1752 if (!other || sk->sk_state != TCP_ESTABLISHED)
1757 unix_state_unlock(other);
1758 mutex_unlock(&unix_sk(other)->readlock);
1759 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1765 /* we must acquire readlock as we modify already present
1766 * skbs in the sk_receive_queue and mess with skb->len
1768 err = mutex_lock_interruptible(&unix_sk(other)->readlock);
1770 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1771 send_sigpipe = false;
1775 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1780 unix_state_lock(other);
1782 if (sock_flag(other, SOCK_DEAD) ||
1783 other->sk_shutdown & RCV_SHUTDOWN) {
1785 goto err_state_unlock;
1788 skb = skb_peek_tail(&other->sk_receive_queue);
1789 if (tail && tail == skb) {
1796 } else if (newskb) {
1797 /* this is fast path, we don't necessarily need to
1798 * call to kfree_skb even though with newskb == NULL
1799 * this - does no harm
1801 consume_skb(newskb);
1804 if (skb_append_pagefrags(skb, page, offset, size)) {
1810 skb->data_len += size;
1811 skb->truesize += size;
1812 atomic_add(size, &sk->sk_wmem_alloc);
1815 __skb_queue_tail(&other->sk_receive_queue, newskb);
1817 unix_state_unlock(other);
1818 mutex_unlock(&unix_sk(other)->readlock);
1820 other->sk_data_ready(other);
1825 unix_state_unlock(other);
1827 mutex_unlock(&unix_sk(other)->readlock);
1830 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
1831 send_sig(SIGPIPE, current, 0);
1835 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
1839 struct sock *sk = sock->sk;
1841 err = sock_error(sk);
1845 if (sk->sk_state != TCP_ESTABLISHED)
1848 if (msg->msg_namelen)
1849 msg->msg_namelen = 0;
1851 return unix_dgram_sendmsg(sock, msg, len);
1854 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
1855 size_t size, int flags)
1857 struct sock *sk = sock->sk;
1859 if (sk->sk_state != TCP_ESTABLISHED)
1862 return unix_dgram_recvmsg(sock, msg, size, flags);
1865 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1867 struct unix_sock *u = unix_sk(sk);
1870 msg->msg_namelen = u->addr->len;
1871 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1875 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1876 size_t size, int flags)
1878 struct scm_cookie scm;
1879 struct sock *sk = sock->sk;
1880 struct unix_sock *u = unix_sk(sk);
1881 int noblock = flags & MSG_DONTWAIT;
1882 struct sk_buff *skb;
1890 err = mutex_lock_interruptible(&u->readlock);
1891 if (unlikely(err)) {
1892 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1893 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1895 err = noblock ? -EAGAIN : -ERESTARTSYS;
1899 skip = sk_peek_offset(sk, flags);
1901 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
1903 unix_state_lock(sk);
1904 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1905 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1906 (sk->sk_shutdown & RCV_SHUTDOWN))
1908 unix_state_unlock(sk);
1912 wake_up_interruptible_sync_poll(&u->peer_wait,
1913 POLLOUT | POLLWRNORM | POLLWRBAND);
1916 unix_copy_addr(msg, skb->sk);
1918 if (size > skb->len - skip)
1919 size = skb->len - skip;
1920 else if (size < skb->len - skip)
1921 msg->msg_flags |= MSG_TRUNC;
1923 err = skb_copy_datagram_msg(skb, skip, msg, size);
1927 if (sock_flag(sk, SOCK_RCVTSTAMP))
1928 __sock_recv_timestamp(msg, sk, skb);
1930 memset(&scm, 0, sizeof(scm));
1932 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
1933 unix_set_secdata(&scm, skb);
1935 if (!(flags & MSG_PEEK)) {
1937 unix_detach_fds(&scm, skb);
1939 sk_peek_offset_bwd(sk, skb->len);
1941 /* It is questionable: on PEEK we could:
1942 - do not return fds - good, but too simple 8)
1943 - return fds, and do not return them on read (old strategy,
1945 - clone fds (I chose it for now, it is the most universal
1948 POSIX 1003.1g does not actually define this clearly
1949 at all. POSIX 1003.1g doesn't define a lot of things
1954 sk_peek_offset_fwd(sk, size);
1957 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
1959 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
1961 scm_recv(sock, msg, &scm, flags);
1964 skb_free_datagram(sk, skb);
1966 mutex_unlock(&u->readlock);
1972 * Sleep until more data has arrived. But check for races..
1974 static long unix_stream_data_wait(struct sock *sk, long timeo,
1975 struct sk_buff *last, unsigned int last_len)
1977 struct sk_buff *tail;
1980 unix_state_lock(sk);
1983 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1985 tail = skb_peek_tail(&sk->sk_receive_queue);
1987 (tail && tail->len != last_len) ||
1989 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1990 signal_pending(current) ||
1994 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1995 unix_state_unlock(sk);
1996 timeo = freezable_schedule_timeout(timeo);
1997 unix_state_lock(sk);
1999 if (sock_flag(sk, SOCK_DEAD))
2002 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2005 finish_wait(sk_sleep(sk), &wait);
2006 unix_state_unlock(sk);
2010 static unsigned int unix_skb_len(const struct sk_buff *skb)
2012 return skb->len - UNIXCB(skb).consumed;
2015 struct unix_stream_read_state {
2016 int (*recv_actor)(struct sk_buff *, int, int,
2017 struct unix_stream_read_state *);
2018 struct socket *socket;
2020 struct pipe_inode_info *pipe;
2023 unsigned int splice_flags;
2026 static int unix_stream_read_generic(struct unix_stream_read_state *state)
2028 struct scm_cookie scm;
2029 struct socket *sock = state->socket;
2030 struct sock *sk = sock->sk;
2031 struct unix_sock *u = unix_sk(sk);
2033 int flags = state->flags;
2034 int noblock = flags & MSG_DONTWAIT;
2035 bool check_creds = false;
2040 size_t size = state->size;
2041 unsigned int last_len;
2044 if (sk->sk_state != TCP_ESTABLISHED)
2048 if (flags & MSG_OOB)
2051 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2052 timeo = sock_rcvtimeo(sk, noblock);
2054 memset(&scm, 0, sizeof(scm));
2056 /* Lock the socket to prevent queue disordering
2057 * while sleeps in memcpy_tomsg
2059 err = mutex_lock_interruptible(&u->readlock);
2060 if (unlikely(err)) {
2061 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2062 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2064 err = noblock ? -EAGAIN : -ERESTARTSYS;
2068 if (flags & MSG_PEEK)
2069 skip = sk_peek_offset(sk, flags);
2075 struct sk_buff *skb, *last;
2077 unix_state_lock(sk);
2078 if (sock_flag(sk, SOCK_DEAD)) {
2082 last = skb = skb_peek(&sk->sk_receive_queue);
2083 last_len = last ? last->len : 0;
2086 unix_sk(sk)->recursion_level = 0;
2087 if (copied >= target)
2091 * POSIX 1003.1g mandates this order.
2094 err = sock_error(sk);
2097 if (sk->sk_shutdown & RCV_SHUTDOWN)
2100 unix_state_unlock(sk);
2104 mutex_unlock(&u->readlock);
2106 timeo = unix_stream_data_wait(sk, timeo, last,
2109 if (signal_pending(current) ||
2110 mutex_lock_interruptible(&u->readlock)) {
2111 err = sock_intr_errno(timeo);
2117 unix_state_unlock(sk);
2121 while (skip >= unix_skb_len(skb)) {
2122 skip -= unix_skb_len(skb);
2124 last_len = skb->len;
2125 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2130 unix_state_unlock(sk);
2133 /* Never glue messages from different writers */
2134 if ((UNIXCB(skb).pid != scm.pid) ||
2135 !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
2136 !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
2137 !unix_secdata_eq(&scm, skb))
2139 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2140 /* Copy credentials */
2141 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2142 unix_set_secdata(&scm, skb);
2146 /* Copy address just once */
2147 if (state->msg && state->msg->msg_name) {
2148 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2149 state->msg->msg_name);
2150 unix_copy_addr(state->msg, skb->sk);
2154 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2155 chunk = state->recv_actor(skb, skip, chunk, state);
2164 /* Mark read part of skb as used */
2165 if (!(flags & MSG_PEEK)) {
2166 UNIXCB(skb).consumed += chunk;
2168 sk_peek_offset_bwd(sk, chunk);
2171 unix_detach_fds(&scm, skb);
2173 if (unix_skb_len(skb))
2176 skb_unlink(skb, &sk->sk_receive_queue);
2182 /* It is questionable, see note in unix_dgram_recvmsg.
2185 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2187 sk_peek_offset_fwd(sk, chunk);
2194 last_len = skb->len;
2195 unix_state_lock(sk);
2196 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2199 unix_state_unlock(sk);
2204 mutex_unlock(&u->readlock);
2206 scm_recv(sock, state->msg, &scm, flags);
2210 return copied ? : err;
2213 static int unix_stream_read_actor(struct sk_buff *skb,
2214 int skip, int chunk,
2215 struct unix_stream_read_state *state)
2219 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2221 return ret ?: chunk;
2224 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2225 size_t size, int flags)
2227 struct unix_stream_read_state state = {
2228 .recv_actor = unix_stream_read_actor,
2235 return unix_stream_read_generic(&state);
2238 static ssize_t skb_unix_socket_splice(struct sock *sk,
2239 struct pipe_inode_info *pipe,
2240 struct splice_pipe_desc *spd)
2243 struct unix_sock *u = unix_sk(sk);
2245 mutex_unlock(&u->readlock);
2246 ret = splice_to_pipe(pipe, spd);
2247 mutex_lock(&u->readlock);
2252 static int unix_stream_splice_actor(struct sk_buff *skb,
2253 int skip, int chunk,
2254 struct unix_stream_read_state *state)
2256 return skb_splice_bits(skb, state->socket->sk,
2257 UNIXCB(skb).consumed + skip,
2258 state->pipe, chunk, state->splice_flags,
2259 skb_unix_socket_splice);
2262 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2263 struct pipe_inode_info *pipe,
2264 size_t size, unsigned int flags)
2266 struct unix_stream_read_state state = {
2267 .recv_actor = unix_stream_splice_actor,
2271 .splice_flags = flags,
2274 if (unlikely(*ppos))
2277 if (sock->file->f_flags & O_NONBLOCK ||
2278 flags & SPLICE_F_NONBLOCK)
2279 state.flags = MSG_DONTWAIT;
2281 return unix_stream_read_generic(&state);
2284 static int unix_shutdown(struct socket *sock, int mode)
2286 struct sock *sk = sock->sk;
2289 if (mode < SHUT_RD || mode > SHUT_RDWR)
2292 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2293 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2294 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2298 unix_state_lock(sk);
2299 sk->sk_shutdown |= mode;
2300 other = unix_peer(sk);
2303 unix_state_unlock(sk);
2304 sk->sk_state_change(sk);
2307 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2311 if (mode&RCV_SHUTDOWN)
2312 peer_mode |= SEND_SHUTDOWN;
2313 if (mode&SEND_SHUTDOWN)
2314 peer_mode |= RCV_SHUTDOWN;
2315 unix_state_lock(other);
2316 other->sk_shutdown |= peer_mode;
2317 unix_state_unlock(other);
2318 other->sk_state_change(other);
2319 if (peer_mode == SHUTDOWN_MASK)
2320 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2321 else if (peer_mode & RCV_SHUTDOWN)
2322 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2330 long unix_inq_len(struct sock *sk)
2332 struct sk_buff *skb;
2335 if (sk->sk_state == TCP_LISTEN)
2338 spin_lock(&sk->sk_receive_queue.lock);
2339 if (sk->sk_type == SOCK_STREAM ||
2340 sk->sk_type == SOCK_SEQPACKET) {
2341 skb_queue_walk(&sk->sk_receive_queue, skb)
2342 amount += unix_skb_len(skb);
2344 skb = skb_peek(&sk->sk_receive_queue);
2348 spin_unlock(&sk->sk_receive_queue.lock);
2352 EXPORT_SYMBOL_GPL(unix_inq_len);
2354 long unix_outq_len(struct sock *sk)
2356 return sk_wmem_alloc_get(sk);
2358 EXPORT_SYMBOL_GPL(unix_outq_len);
2360 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2362 struct sock *sk = sock->sk;
2368 amount = unix_outq_len(sk);
2369 err = put_user(amount, (int __user *)arg);
2372 amount = unix_inq_len(sk);
2376 err = put_user(amount, (int __user *)arg);
2385 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2387 struct sock *sk = sock->sk;
2390 sock_poll_wait(file, sk_sleep(sk), wait);
2393 /* exceptional events? */
2396 if (sk->sk_shutdown == SHUTDOWN_MASK)
2398 if (sk->sk_shutdown & RCV_SHUTDOWN)
2399 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2402 if (!skb_queue_empty(&sk->sk_receive_queue))
2403 mask |= POLLIN | POLLRDNORM;
2405 /* Connection-based need to check for termination and startup */
2406 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2407 sk->sk_state == TCP_CLOSE)
2411 * we set writable also when the other side has shut down the
2412 * connection. This prevents stuck sockets.
2414 if (unix_writable(sk))
2415 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2420 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2423 struct sock *sk = sock->sk, *other;
2424 unsigned int mask, writable;
2426 sock_poll_wait(file, sk_sleep(sk), wait);
2429 /* exceptional events? */
2430 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2432 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2434 if (sk->sk_shutdown & RCV_SHUTDOWN)
2435 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2436 if (sk->sk_shutdown == SHUTDOWN_MASK)
2440 if (!skb_queue_empty(&sk->sk_receive_queue))
2441 mask |= POLLIN | POLLRDNORM;
2443 /* Connection-based need to check for termination and startup */
2444 if (sk->sk_type == SOCK_SEQPACKET) {
2445 if (sk->sk_state == TCP_CLOSE)
2447 /* connection hasn't started yet? */
2448 if (sk->sk_state == TCP_SYN_SENT)
2452 /* No write status requested, avoid expensive OUT tests. */
2453 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2456 writable = unix_writable(sk);
2457 other = unix_peer_get(sk);
2459 if (unix_peer(other) != sk) {
2460 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2461 if (unix_recvq_full(other))
2468 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2470 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2475 #ifdef CONFIG_PROC_FS
2477 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2479 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2480 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2481 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2483 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2485 unsigned long offset = get_offset(*pos);
2486 unsigned long bucket = get_bucket(*pos);
2488 unsigned long count = 0;
2490 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2491 if (sock_net(sk) != seq_file_net(seq))
2493 if (++count == offset)
2500 static struct sock *unix_next_socket(struct seq_file *seq,
2504 unsigned long bucket;
2506 while (sk > (struct sock *)SEQ_START_TOKEN) {
2510 if (sock_net(sk) == seq_file_net(seq))
2515 sk = unix_from_bucket(seq, pos);
2520 bucket = get_bucket(*pos) + 1;
2521 *pos = set_bucket_offset(bucket, 1);
2522 } while (bucket < ARRAY_SIZE(unix_socket_table));
2527 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2528 __acquires(unix_table_lock)
2530 spin_lock(&unix_table_lock);
2533 return SEQ_START_TOKEN;
2535 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2538 return unix_next_socket(seq, NULL, pos);
2541 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2544 return unix_next_socket(seq, v, pos);
2547 static void unix_seq_stop(struct seq_file *seq, void *v)
2548 __releases(unix_table_lock)
2550 spin_unlock(&unix_table_lock);
2553 static int unix_seq_show(struct seq_file *seq, void *v)
2556 if (v == SEQ_START_TOKEN)
2557 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2561 struct unix_sock *u = unix_sk(s);
2564 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2566 atomic_read(&s->sk_refcnt),
2568 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2571 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2572 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2580 len = u->addr->len - sizeof(short);
2581 if (!UNIX_ABSTRACT(s))
2587 for ( ; i < len; i++)
2588 seq_putc(seq, u->addr->name->sun_path[i]);
2590 unix_state_unlock(s);
2591 seq_putc(seq, '\n');
2597 static const struct seq_operations unix_seq_ops = {
2598 .start = unix_seq_start,
2599 .next = unix_seq_next,
2600 .stop = unix_seq_stop,
2601 .show = unix_seq_show,
2604 static int unix_seq_open(struct inode *inode, struct file *file)
2606 return seq_open_net(inode, file, &unix_seq_ops,
2607 sizeof(struct seq_net_private));
2610 static const struct file_operations unix_seq_fops = {
2611 .owner = THIS_MODULE,
2612 .open = unix_seq_open,
2614 .llseek = seq_lseek,
2615 .release = seq_release_net,
2620 static const struct net_proto_family unix_family_ops = {
2622 .create = unix_create,
2623 .owner = THIS_MODULE,
2627 static int __net_init unix_net_init(struct net *net)
2629 int error = -ENOMEM;
2631 net->unx.sysctl_max_dgram_qlen = 10;
2632 if (unix_sysctl_register(net))
2635 #ifdef CONFIG_PROC_FS
2636 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2637 unix_sysctl_unregister(net);
2646 static void __net_exit unix_net_exit(struct net *net)
2648 unix_sysctl_unregister(net);
2649 remove_proc_entry("unix", net->proc_net);
2652 static struct pernet_operations unix_net_ops = {
2653 .init = unix_net_init,
2654 .exit = unix_net_exit,
2657 static int __init af_unix_init(void)
2661 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2663 rc = proto_register(&unix_proto, 1);
2665 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
2669 sock_register(&unix_family_ops);
2670 register_pernet_subsys(&unix_net_ops);
2675 static void __exit af_unix_exit(void)
2677 sock_unregister(PF_UNIX);
2678 proto_unregister(&unix_proto);
2679 unregister_pernet_subsys(&unix_net_ops);
2682 /* Earlier than device_initcall() so that other drivers invoking
2683 request_module() don't end up in a loop when modprobe tries
2684 to use a UNIX socket. But later than subsys_initcall() because
2685 we depend on stuff initialised there */
2686 fs_initcall(af_unix_init);
2687 module_exit(af_unix_exit);
2689 MODULE_LICENSE("GPL");
2690 MODULE_ALIAS_NETPROTO(PF_UNIX);