4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/log2.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/pipe_fs_i.h>
18 #include <linux/uio.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/audit.h>
22 #include <linux/syscalls.h>
23 #include <linux/fcntl.h>
24 #include <linux/aio.h>
26 #include <asm/uaccess.h>
27 #include <asm/ioctls.h>
32 * The max size that a non-root user is allowed to grow the pipe. Can
33 * be set by root in /proc/sys/fs/pipe-max-size
35 unsigned int pipe_max_size = 1048576;
38 * Minimum pipe size, as required by POSIX
40 unsigned int pipe_min_size = PAGE_SIZE;
43 * We use a start+len construction, which provides full use of the
45 * -- Florian Coosmann (FGC)
47 * Reads with count = 0 should always return 0.
48 * -- Julian Bradfield 1999-06-07.
50 * FIFOs and Pipes now generate SIGIO for both readers and writers.
51 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
53 * pipe_read & write cleanup
54 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
57 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
60 mutex_lock_nested(&pipe->mutex, subclass);
63 void pipe_lock(struct pipe_inode_info *pipe)
66 * pipe_lock() nests non-pipe inode locks (for writing to a file)
68 pipe_lock_nested(pipe, I_MUTEX_PARENT);
70 EXPORT_SYMBOL(pipe_lock);
72 void pipe_unlock(struct pipe_inode_info *pipe)
75 mutex_unlock(&pipe->mutex);
77 EXPORT_SYMBOL(pipe_unlock);
79 static inline void __pipe_lock(struct pipe_inode_info *pipe)
81 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
84 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
86 mutex_unlock(&pipe->mutex);
89 void pipe_double_lock(struct pipe_inode_info *pipe1,
90 struct pipe_inode_info *pipe2)
92 BUG_ON(pipe1 == pipe2);
95 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
96 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
98 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
99 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
103 /* Drop the inode semaphore and wait for a pipe event, atomically */
104 void pipe_wait(struct pipe_inode_info *pipe)
109 * Pipes are system-local resources, so sleeping on them
110 * is considered a noninteractive wait:
112 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
115 finish_wait(&pipe->wait, &wait);
120 pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
126 while (!iov->iov_len)
128 copy = min_t(unsigned long, len, iov->iov_len);
131 if (__copy_from_user_inatomic(to, iov->iov_base, copy))
134 if (copy_from_user(to, iov->iov_base, copy))
139 iov->iov_base += copy;
140 iov->iov_len -= copy;
146 * Pre-fault in the user memory, so we can use atomic copies.
148 static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
150 while (!iov->iov_len)
154 unsigned long this_len;
156 this_len = min_t(unsigned long, len, iov->iov_len);
157 fault_in_pages_readable(iov->iov_base, this_len);
163 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
164 struct pipe_buffer *buf)
166 struct page *page = buf->page;
169 * If nobody else uses this page, and we don't already have a
170 * temporary page, let's keep track of it as a one-deep
171 * allocation cache. (Otherwise just release our reference to it)
173 if (page_count(page) == 1 && !pipe->tmp_page)
174 pipe->tmp_page = page;
176 page_cache_release(page);
180 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
181 * @pipe: the pipe that the buffer belongs to
182 * @buf: the buffer to attempt to steal
185 * This function attempts to steal the &struct page attached to
186 * @buf. If successful, this function returns 0 and returns with
187 * the page locked. The caller may then reuse the page for whatever
188 * he wishes; the typical use is insertion into a different file
191 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
192 struct pipe_buffer *buf)
194 struct page *page = buf->page;
197 * A reference of one is golden, that means that the owner of this
198 * page is the only one holding a reference to it. lock the page
201 if (page_count(page) == 1) {
208 EXPORT_SYMBOL(generic_pipe_buf_steal);
211 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
212 * @pipe: the pipe that the buffer belongs to
213 * @buf: the buffer to get a reference to
216 * This function grabs an extra reference to @buf. It's used in
217 * in the tee() system call, when we duplicate the buffers in one
220 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
222 page_cache_get(buf->page);
224 EXPORT_SYMBOL(generic_pipe_buf_get);
227 * generic_pipe_buf_confirm - verify contents of the pipe buffer
228 * @info: the pipe that the buffer belongs to
229 * @buf: the buffer to confirm
232 * This function does nothing, because the generic pipe code uses
233 * pages that are always good when inserted into the pipe.
235 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
236 struct pipe_buffer *buf)
240 EXPORT_SYMBOL(generic_pipe_buf_confirm);
243 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
244 * @pipe: the pipe that the buffer belongs to
245 * @buf: the buffer to put a reference to
248 * This function releases a reference to @buf.
250 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
251 struct pipe_buffer *buf)
253 page_cache_release(buf->page);
255 EXPORT_SYMBOL(generic_pipe_buf_release);
257 static const struct pipe_buf_operations anon_pipe_buf_ops = {
259 .confirm = generic_pipe_buf_confirm,
260 .release = anon_pipe_buf_release,
261 .steal = generic_pipe_buf_steal,
262 .get = generic_pipe_buf_get,
265 static const struct pipe_buf_operations packet_pipe_buf_ops = {
267 .confirm = generic_pipe_buf_confirm,
268 .release = anon_pipe_buf_release,
269 .steal = generic_pipe_buf_steal,
270 .get = generic_pipe_buf_get,
274 pipe_read(struct kiocb *iocb, struct iov_iter *to)
276 size_t total_len = iov_iter_count(to);
277 struct file *filp = iocb->ki_filp;
278 struct pipe_inode_info *pipe = filp->private_data;
282 /* Null read succeeds. */
283 if (unlikely(total_len == 0))
290 int bufs = pipe->nrbufs;
292 int curbuf = pipe->curbuf;
293 struct pipe_buffer *buf = pipe->bufs + curbuf;
294 const struct pipe_buf_operations *ops = buf->ops;
295 size_t chars = buf->len;
299 if (chars > total_len)
302 error = ops->confirm(pipe, buf);
309 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
310 if (unlikely(written < chars)) {
316 buf->offset += chars;
319 /* Was it a packet buffer? Clean up and exit */
320 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
327 ops->release(pipe, buf);
328 curbuf = (curbuf + 1) & (pipe->buffers - 1);
329 pipe->curbuf = curbuf;
330 pipe->nrbufs = --bufs;
335 break; /* common path: read succeeded */
337 if (bufs) /* More to do? */
341 if (!pipe->waiting_writers) {
342 /* syscall merging: Usually we must not sleep
343 * if O_NONBLOCK is set, or if we got some data.
344 * But if a writer sleeps in kernel space, then
345 * we can wait for that data without violating POSIX.
349 if (filp->f_flags & O_NONBLOCK) {
354 if (signal_pending(current)) {
360 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
361 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
367 /* Signal writers asynchronously that there is more room. */
369 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
370 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
377 static inline int is_packetized(struct file *file)
379 return (file->f_flags & O_DIRECT) != 0;
383 pipe_write(struct kiocb *iocb, const struct iovec *_iov,
384 unsigned long nr_segs, loff_t ppos)
386 struct file *filp = iocb->ki_filp;
387 struct pipe_inode_info *pipe = filp->private_data;
390 struct iovec *iov = (struct iovec *)_iov;
394 total_len = iov_length(iov, nr_segs);
395 /* Null write succeeds. */
396 if (unlikely(total_len == 0))
403 if (!pipe->readers) {
404 send_sig(SIGPIPE, current, 0);
409 /* We try to merge small writes */
410 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
411 if (pipe->nrbufs && chars != 0) {
412 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
414 struct pipe_buffer *buf = pipe->bufs + lastbuf;
415 const struct pipe_buf_operations *ops = buf->ops;
416 int offset = buf->offset + buf->len;
418 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
419 int error, atomic = 1;
422 error = ops->confirm(pipe, buf);
426 iov_fault_in_pages_read(iov, chars);
429 addr = kmap_atomic(buf->page);
431 addr = kmap(buf->page);
432 error = pipe_iov_copy_from_user(offset + addr, iov,
458 if (!pipe->readers) {
459 send_sig(SIGPIPE, current, 0);
465 if (bufs < pipe->buffers) {
466 int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
467 struct pipe_buffer *buf = pipe->bufs + newbuf;
468 struct page *page = pipe->tmp_page;
470 int error, atomic = 1;
473 page = alloc_page(GFP_HIGHUSER);
474 if (unlikely(!page)) {
475 ret = ret ? : -ENOMEM;
478 pipe->tmp_page = page;
480 /* Always wake up, even if the copy fails. Otherwise
481 * we lock up (O_NONBLOCK-)readers that sleep due to
483 * FIXME! Is this really true?
487 if (chars > total_len)
490 iov_fault_in_pages_read(iov, chars);
493 src = kmap_atomic(page);
497 error = pipe_iov_copy_from_user(src, iov, chars,
504 if (unlikely(error)) {
515 /* Insert it into the buffer array */
517 buf->ops = &anon_pipe_buf_ops;
521 if (is_packetized(filp)) {
522 buf->ops = &packet_pipe_buf_ops;
523 buf->flags = PIPE_BUF_FLAG_PACKET;
525 pipe->nrbufs = ++bufs;
526 pipe->tmp_page = NULL;
532 if (bufs < pipe->buffers)
534 if (filp->f_flags & O_NONBLOCK) {
539 if (signal_pending(current)) {
545 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
546 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
549 pipe->waiting_writers++;
551 pipe->waiting_writers--;
556 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
557 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
559 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
560 int err = file_update_time(filp);
563 sb_end_write(file_inode(filp)->i_sb);
568 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
570 struct pipe_inode_info *pipe = filp->private_data;
571 int count, buf, nrbufs;
578 nrbufs = pipe->nrbufs;
579 while (--nrbufs >= 0) {
580 count += pipe->bufs[buf].len;
581 buf = (buf+1) & (pipe->buffers - 1);
585 return put_user(count, (int __user *)arg);
591 /* No kernel lock held - fine */
593 pipe_poll(struct file *filp, poll_table *wait)
596 struct pipe_inode_info *pipe = filp->private_data;
599 poll_wait(filp, &pipe->wait, wait);
601 /* Reading only -- no need for acquiring the semaphore. */
602 nrbufs = pipe->nrbufs;
604 if (filp->f_mode & FMODE_READ) {
605 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
606 if (!pipe->writers && filp->f_version != pipe->w_counter)
610 if (filp->f_mode & FMODE_WRITE) {
611 mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
613 * Most Unices do not set POLLERR for FIFOs but on Linux they
614 * behave exactly like pipes for poll().
623 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
627 spin_lock(&inode->i_lock);
628 if (!--pipe->files) {
629 inode->i_pipe = NULL;
632 spin_unlock(&inode->i_lock);
635 free_pipe_info(pipe);
639 pipe_release(struct inode *inode, struct file *file)
641 struct pipe_inode_info *pipe = file->private_data;
644 if (file->f_mode & FMODE_READ)
646 if (file->f_mode & FMODE_WRITE)
649 if (pipe->readers || pipe->writers) {
650 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
651 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
652 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
656 put_pipe_info(inode, pipe);
661 pipe_fasync(int fd, struct file *filp, int on)
663 struct pipe_inode_info *pipe = filp->private_data;
667 if (filp->f_mode & FMODE_READ)
668 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
669 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
670 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
671 if (retval < 0 && (filp->f_mode & FMODE_READ))
672 /* this can happen only if on == T */
673 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
679 struct pipe_inode_info *alloc_pipe_info(void)
681 struct pipe_inode_info *pipe;
683 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
685 pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
687 init_waitqueue_head(&pipe->wait);
688 pipe->r_counter = pipe->w_counter = 1;
689 pipe->buffers = PIPE_DEF_BUFFERS;
690 mutex_init(&pipe->mutex);
699 void free_pipe_info(struct pipe_inode_info *pipe)
703 for (i = 0; i < pipe->buffers; i++) {
704 struct pipe_buffer *buf = pipe->bufs + i;
706 buf->ops->release(pipe, buf);
709 __free_page(pipe->tmp_page);
714 static struct vfsmount *pipe_mnt __read_mostly;
717 * pipefs_dname() is called from d_path().
719 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
721 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
722 dentry->d_inode->i_ino);
725 static const struct dentry_operations pipefs_dentry_operations = {
726 .d_dname = pipefs_dname,
729 static struct inode * get_pipe_inode(void)
731 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
732 struct pipe_inode_info *pipe;
737 inode->i_ino = get_next_ino();
739 pipe = alloc_pipe_info();
743 inode->i_pipe = pipe;
745 pipe->readers = pipe->writers = 1;
746 inode->i_fop = &pipefifo_fops;
749 * Mark the inode dirty from the very beginning,
750 * that way it will never be moved to the dirty
751 * list because "mark_inode_dirty()" will think
752 * that it already _is_ on the dirty list.
754 inode->i_state = I_DIRTY;
755 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
756 inode->i_uid = current_fsuid();
757 inode->i_gid = current_fsgid();
758 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
769 int create_pipe_files(struct file **res, int flags)
772 struct inode *inode = get_pipe_inode();
775 static struct qstr name = { .name = "" };
781 path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
784 path.mnt = mntget(pipe_mnt);
786 d_instantiate(path.dentry, inode);
789 f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
793 f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
794 f->private_data = inode->i_pipe;
796 res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
801 res[0]->private_data = inode->i_pipe;
802 res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
809 free_pipe_info(inode->i_pipe);
814 free_pipe_info(inode->i_pipe);
819 static int __do_pipe_flags(int *fd, struct file **files, int flags)
824 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
827 error = create_pipe_files(files, flags);
831 error = get_unused_fd_flags(flags);
836 error = get_unused_fd_flags(flags);
841 audit_fd_pair(fdr, fdw);
854 int do_pipe_flags(int *fd, int flags)
856 struct file *files[2];
857 int error = __do_pipe_flags(fd, files, flags);
859 fd_install(fd[0], files[0]);
860 fd_install(fd[1], files[1]);
866 * sys_pipe() is the normal C calling standard for creating
867 * a pipe. It's not the way Unix traditionally does this, though.
869 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
871 struct file *files[2];
875 error = __do_pipe_flags(fd, files, flags);
877 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
880 put_unused_fd(fd[0]);
881 put_unused_fd(fd[1]);
884 fd_install(fd[0], files[0]);
885 fd_install(fd[1], files[1]);
891 SYSCALL_DEFINE1(pipe, int __user *, fildes)
893 return sys_pipe2(fildes, 0);
896 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
900 while (cur == *cnt) {
902 if (signal_pending(current))
905 return cur == *cnt ? -ERESTARTSYS : 0;
908 static void wake_up_partner(struct pipe_inode_info *pipe)
910 wake_up_interruptible(&pipe->wait);
913 static int fifo_open(struct inode *inode, struct file *filp)
915 struct pipe_inode_info *pipe;
916 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
921 spin_lock(&inode->i_lock);
923 pipe = inode->i_pipe;
925 spin_unlock(&inode->i_lock);
927 spin_unlock(&inode->i_lock);
928 pipe = alloc_pipe_info();
932 spin_lock(&inode->i_lock);
933 if (unlikely(inode->i_pipe)) {
934 inode->i_pipe->files++;
935 spin_unlock(&inode->i_lock);
936 free_pipe_info(pipe);
937 pipe = inode->i_pipe;
939 inode->i_pipe = pipe;
940 spin_unlock(&inode->i_lock);
943 filp->private_data = pipe;
944 /* OK, we have a pipe and it's pinned down */
948 /* We can only do regular read/write on fifos */
949 filp->f_mode &= (FMODE_READ | FMODE_WRITE);
951 switch (filp->f_mode) {
955 * POSIX.1 says that O_NONBLOCK means return with the FIFO
956 * opened, even when there is no process writing the FIFO.
959 if (pipe->readers++ == 0)
960 wake_up_partner(pipe);
962 if (!is_pipe && !pipe->writers) {
963 if ((filp->f_flags & O_NONBLOCK)) {
964 /* suppress POLLHUP until we have
966 filp->f_version = pipe->w_counter;
968 if (wait_for_partner(pipe, &pipe->w_counter))
977 * POSIX.1 says that O_NONBLOCK means return -1 with
978 * errno=ENXIO when there is no process reading the FIFO.
981 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
985 if (!pipe->writers++)
986 wake_up_partner(pipe);
988 if (!is_pipe && !pipe->readers) {
989 if (wait_for_partner(pipe, &pipe->r_counter))
994 case FMODE_READ | FMODE_WRITE:
997 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
998 * This implementation will NEVER block on a O_RDWR open, since
999 * the process can at least talk to itself.
1006 if (pipe->readers == 1 || pipe->writers == 1)
1007 wake_up_partner(pipe);
1016 __pipe_unlock(pipe);
1020 if (!--pipe->readers)
1021 wake_up_interruptible(&pipe->wait);
1026 if (!--pipe->writers)
1027 wake_up_interruptible(&pipe->wait);
1032 __pipe_unlock(pipe);
1034 put_pipe_info(inode, pipe);
1038 const struct file_operations pipefifo_fops = {
1040 .llseek = no_llseek,
1041 .read = new_sync_read,
1042 .read_iter = pipe_read,
1043 .write = do_sync_write,
1044 .aio_write = pipe_write,
1046 .unlocked_ioctl = pipe_ioctl,
1047 .release = pipe_release,
1048 .fasync = pipe_fasync,
1052 * Allocate a new array of pipe buffers and copy the info over. Returns the
1053 * pipe size if successful, or return -ERROR on error.
1055 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
1057 struct pipe_buffer *bufs;
1060 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1061 * expect a lot of shrink+grow operations, just free and allocate
1062 * again like we would do for growing. If the pipe currently
1063 * contains more buffers than arg, then return busy.
1065 if (nr_pages < pipe->nrbufs)
1068 bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
1069 if (unlikely(!bufs))
1073 * The pipe array wraps around, so just start the new one at zero
1074 * and adjust the indexes.
1080 tail = pipe->curbuf + pipe->nrbufs;
1081 if (tail < pipe->buffers)
1084 tail &= (pipe->buffers - 1);
1086 head = pipe->nrbufs - tail;
1088 memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1090 memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1096 pipe->buffers = nr_pages;
1097 return nr_pages * PAGE_SIZE;
1101 * Currently we rely on the pipe array holding a power-of-2 number
1104 static inline unsigned int round_pipe_size(unsigned int size)
1106 unsigned long nr_pages;
1108 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1109 return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1113 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1114 * will return an error.
1116 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1117 size_t *lenp, loff_t *ppos)
1121 ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1122 if (ret < 0 || !write)
1125 pipe_max_size = round_pipe_size(pipe_max_size);
1130 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1131 * location, so checking ->i_pipe is not enough to verify that this is a
1134 struct pipe_inode_info *get_pipe_info(struct file *file)
1136 return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1139 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1141 struct pipe_inode_info *pipe;
1144 pipe = get_pipe_info(file);
1151 case F_SETPIPE_SZ: {
1152 unsigned int size, nr_pages;
1154 size = round_pipe_size(arg);
1155 nr_pages = size >> PAGE_SHIFT;
1161 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
1165 ret = pipe_set_size(pipe, nr_pages);
1169 ret = pipe->buffers * PAGE_SIZE;
1177 __pipe_unlock(pipe);
1181 static const struct super_operations pipefs_ops = {
1182 .destroy_inode = free_inode_nonrcu,
1183 .statfs = simple_statfs,
1187 * pipefs should _never_ be mounted by userland - too much of security hassle,
1188 * no real gain from having the whole whorehouse mounted. So we don't need
1189 * any operations on the root directory. However, we need a non-trivial
1190 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1192 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1193 int flags, const char *dev_name, void *data)
1195 return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1196 &pipefs_dentry_operations, PIPEFS_MAGIC);
1199 static struct file_system_type pipe_fs_type = {
1201 .mount = pipefs_mount,
1202 .kill_sb = kill_anon_super,
1205 static int __init init_pipe_fs(void)
1207 int err = register_filesystem(&pipe_fs_type);
1210 pipe_mnt = kern_mount(&pipe_fs_type);
1211 if (IS_ERR(pipe_mnt)) {
1212 err = PTR_ERR(pipe_mnt);
1213 unregister_filesystem(&pipe_fs_type);
1219 fs_initcall(init_pipe_fs);