4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/log2.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/pipe_fs_i.h>
18 #include <linux/uio.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/audit.h>
22 #include <linux/syscalls.h>
23 #include <linux/fcntl.h>
24 #include <linux/aio.h>
26 #include <asm/uaccess.h>
27 #include <asm/ioctls.h>
32 * The max size that a non-root user is allowed to grow the pipe. Can
33 * be set by root in /proc/sys/fs/pipe-max-size
35 unsigned int pipe_max_size = 1048576;
38 * Minimum pipe size, as required by POSIX
40 unsigned int pipe_min_size = PAGE_SIZE;
43 * We use a start+len construction, which provides full use of the
45 * -- Florian Coosmann (FGC)
47 * Reads with count = 0 should always return 0.
48 * -- Julian Bradfield 1999-06-07.
50 * FIFOs and Pipes now generate SIGIO for both readers and writers.
51 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
53 * pipe_read & write cleanup
54 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
57 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
60 mutex_lock_nested(&pipe->mutex, subclass);
63 void pipe_lock(struct pipe_inode_info *pipe)
66 * pipe_lock() nests non-pipe inode locks (for writing to a file)
68 pipe_lock_nested(pipe, I_MUTEX_PARENT);
70 EXPORT_SYMBOL(pipe_lock);
72 void pipe_unlock(struct pipe_inode_info *pipe)
75 mutex_unlock(&pipe->mutex);
77 EXPORT_SYMBOL(pipe_unlock);
79 static inline void __pipe_lock(struct pipe_inode_info *pipe)
81 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
84 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
86 mutex_unlock(&pipe->mutex);
89 void pipe_double_lock(struct pipe_inode_info *pipe1,
90 struct pipe_inode_info *pipe2)
92 BUG_ON(pipe1 == pipe2);
95 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
96 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
98 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
99 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
103 /* Drop the inode semaphore and wait for a pipe event, atomically */
104 void pipe_wait(struct pipe_inode_info *pipe)
109 * Pipes are system-local resources, so sleeping on them
110 * is considered a noninteractive wait:
112 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
115 finish_wait(&pipe->wait, &wait);
120 pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
126 while (!iov->iov_len)
128 copy = min_t(unsigned long, len, iov->iov_len);
131 if (__copy_from_user_inatomic(to, iov->iov_base, copy))
134 if (copy_from_user(to, iov->iov_base, copy))
139 iov->iov_base += copy;
140 iov->iov_len -= copy;
146 pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
152 while (!iov->iov_len)
154 copy = min_t(unsigned long, len, iov->iov_len);
157 if (__copy_to_user_inatomic(iov->iov_base, from, copy))
160 if (copy_to_user(iov->iov_base, from, copy))
165 iov->iov_base += copy;
166 iov->iov_len -= copy;
172 * Attempt to pre-fault in the user memory, so we can use atomic copies.
173 * Returns the number of bytes not faulted in.
175 static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
177 while (!iov->iov_len)
181 unsigned long this_len;
183 this_len = min_t(unsigned long, len, iov->iov_len);
184 if (fault_in_pages_writeable(iov->iov_base, this_len))
195 * Pre-fault in the user memory, so we can use atomic copies.
197 static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
199 while (!iov->iov_len)
203 unsigned long this_len;
205 this_len = min_t(unsigned long, len, iov->iov_len);
206 fault_in_pages_readable(iov->iov_base, this_len);
212 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
213 struct pipe_buffer *buf)
215 struct page *page = buf->page;
218 * If nobody else uses this page, and we don't already have a
219 * temporary page, let's keep track of it as a one-deep
220 * allocation cache. (Otherwise just release our reference to it)
222 if (page_count(page) == 1 && !pipe->tmp_page)
223 pipe->tmp_page = page;
225 page_cache_release(page);
229 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
230 * @pipe: the pipe that the buffer belongs to
231 * @buf: the buffer to attempt to steal
234 * This function attempts to steal the &struct page attached to
235 * @buf. If successful, this function returns 0 and returns with
236 * the page locked. The caller may then reuse the page for whatever
237 * he wishes; the typical use is insertion into a different file
240 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
241 struct pipe_buffer *buf)
243 struct page *page = buf->page;
246 * A reference of one is golden, that means that the owner of this
247 * page is the only one holding a reference to it. lock the page
250 if (page_count(page) == 1) {
257 EXPORT_SYMBOL(generic_pipe_buf_steal);
260 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
261 * @pipe: the pipe that the buffer belongs to
262 * @buf: the buffer to get a reference to
265 * This function grabs an extra reference to @buf. It's used in
266 * in the tee() system call, when we duplicate the buffers in one
269 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
271 page_cache_get(buf->page);
273 EXPORT_SYMBOL(generic_pipe_buf_get);
276 * generic_pipe_buf_confirm - verify contents of the pipe buffer
277 * @info: the pipe that the buffer belongs to
278 * @buf: the buffer to confirm
281 * This function does nothing, because the generic pipe code uses
282 * pages that are always good when inserted into the pipe.
284 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
285 struct pipe_buffer *buf)
289 EXPORT_SYMBOL(generic_pipe_buf_confirm);
292 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
293 * @pipe: the pipe that the buffer belongs to
294 * @buf: the buffer to put a reference to
297 * This function releases a reference to @buf.
299 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
300 struct pipe_buffer *buf)
302 page_cache_release(buf->page);
304 EXPORT_SYMBOL(generic_pipe_buf_release);
306 static const struct pipe_buf_operations anon_pipe_buf_ops = {
308 .confirm = generic_pipe_buf_confirm,
309 .release = anon_pipe_buf_release,
310 .steal = generic_pipe_buf_steal,
311 .get = generic_pipe_buf_get,
314 static const struct pipe_buf_operations packet_pipe_buf_ops = {
316 .confirm = generic_pipe_buf_confirm,
317 .release = anon_pipe_buf_release,
318 .steal = generic_pipe_buf_steal,
319 .get = generic_pipe_buf_get,
323 pipe_read(struct kiocb *iocb, const struct iovec *_iov,
324 unsigned long nr_segs, loff_t pos)
326 struct file *filp = iocb->ki_filp;
327 struct pipe_inode_info *pipe = filp->private_data;
330 struct iovec *iov = (struct iovec *)_iov;
333 total_len = iov_length(iov, nr_segs);
334 /* Null read succeeds. */
335 if (unlikely(total_len == 0))
342 int bufs = pipe->nrbufs;
344 int curbuf = pipe->curbuf;
345 struct pipe_buffer *buf = pipe->bufs + curbuf;
346 const struct pipe_buf_operations *ops = buf->ops;
348 size_t chars = buf->len;
351 if (chars > total_len)
354 error = ops->confirm(pipe, buf);
361 atomic = !iov_fault_in_pages_write(iov, chars);
364 addr = kmap_atomic(buf->page);
366 addr = kmap(buf->page);
367 error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
372 if (unlikely(error)) {
374 * Just retry with the slow path if we failed.
385 buf->offset += chars;
388 /* Was it a packet buffer? Clean up and exit */
389 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
396 ops->release(pipe, buf);
397 curbuf = (curbuf + 1) & (pipe->buffers - 1);
398 pipe->curbuf = curbuf;
399 pipe->nrbufs = --bufs;
404 break; /* common path: read succeeded */
406 if (bufs) /* More to do? */
410 if (!pipe->waiting_writers) {
411 /* syscall merging: Usually we must not sleep
412 * if O_NONBLOCK is set, or if we got some data.
413 * But if a writer sleeps in kernel space, then
414 * we can wait for that data without violating POSIX.
418 if (filp->f_flags & O_NONBLOCK) {
423 if (signal_pending(current)) {
429 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
430 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
436 /* Signal writers asynchronously that there is more room. */
438 wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
439 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
446 static inline int is_packetized(struct file *file)
448 return (file->f_flags & O_DIRECT) != 0;
452 pipe_write(struct kiocb *iocb, const struct iovec *_iov,
453 unsigned long nr_segs, loff_t ppos)
455 struct file *filp = iocb->ki_filp;
456 struct pipe_inode_info *pipe = filp->private_data;
459 struct iovec *iov = (struct iovec *)_iov;
463 total_len = iov_length(iov, nr_segs);
464 /* Null write succeeds. */
465 if (unlikely(total_len == 0))
472 if (!pipe->readers) {
473 send_sig(SIGPIPE, current, 0);
478 /* We try to merge small writes */
479 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
480 if (pipe->nrbufs && chars != 0) {
481 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
483 struct pipe_buffer *buf = pipe->bufs + lastbuf;
484 const struct pipe_buf_operations *ops = buf->ops;
485 int offset = buf->offset + buf->len;
487 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
488 int error, atomic = 1;
491 error = ops->confirm(pipe, buf);
495 iov_fault_in_pages_read(iov, chars);
498 addr = kmap_atomic(buf->page);
500 addr = kmap(buf->page);
501 error = pipe_iov_copy_from_user(offset + addr, iov,
527 if (!pipe->readers) {
528 send_sig(SIGPIPE, current, 0);
534 if (bufs < pipe->buffers) {
535 int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
536 struct pipe_buffer *buf = pipe->bufs + newbuf;
537 struct page *page = pipe->tmp_page;
539 int error, atomic = 1;
542 page = alloc_page(GFP_HIGHUSER);
543 if (unlikely(!page)) {
544 ret = ret ? : -ENOMEM;
547 pipe->tmp_page = page;
549 /* Always wake up, even if the copy fails. Otherwise
550 * we lock up (O_NONBLOCK-)readers that sleep due to
552 * FIXME! Is this really true?
556 if (chars > total_len)
559 iov_fault_in_pages_read(iov, chars);
562 src = kmap_atomic(page);
566 error = pipe_iov_copy_from_user(src, iov, chars,
573 if (unlikely(error)) {
584 /* Insert it into the buffer array */
586 buf->ops = &anon_pipe_buf_ops;
590 if (is_packetized(filp)) {
591 buf->ops = &packet_pipe_buf_ops;
592 buf->flags = PIPE_BUF_FLAG_PACKET;
594 pipe->nrbufs = ++bufs;
595 pipe->tmp_page = NULL;
601 if (bufs < pipe->buffers)
603 if (filp->f_flags & O_NONBLOCK) {
608 if (signal_pending(current)) {
614 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
615 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
618 pipe->waiting_writers++;
620 pipe->waiting_writers--;
625 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
626 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
628 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
629 int err = file_update_time(filp);
632 sb_end_write(file_inode(filp)->i_sb);
637 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
639 struct pipe_inode_info *pipe = filp->private_data;
640 int count, buf, nrbufs;
647 nrbufs = pipe->nrbufs;
648 while (--nrbufs >= 0) {
649 count += pipe->bufs[buf].len;
650 buf = (buf+1) & (pipe->buffers - 1);
654 return put_user(count, (int __user *)arg);
660 /* No kernel lock held - fine */
662 pipe_poll(struct file *filp, poll_table *wait)
665 struct pipe_inode_info *pipe = filp->private_data;
668 poll_wait(filp, &pipe->wait, wait);
670 /* Reading only -- no need for acquiring the semaphore. */
671 nrbufs = pipe->nrbufs;
673 if (filp->f_mode & FMODE_READ) {
674 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
675 if (!pipe->writers && filp->f_version != pipe->w_counter)
679 if (filp->f_mode & FMODE_WRITE) {
680 mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
682 * Most Unices do not set POLLERR for FIFOs but on Linux they
683 * behave exactly like pipes for poll().
692 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
696 spin_lock(&inode->i_lock);
697 if (!--pipe->files) {
698 inode->i_pipe = NULL;
701 spin_unlock(&inode->i_lock);
704 free_pipe_info(pipe);
708 pipe_release(struct inode *inode, struct file *file)
710 struct pipe_inode_info *pipe = file->private_data;
713 if (file->f_mode & FMODE_READ)
715 if (file->f_mode & FMODE_WRITE)
718 if (pipe->readers || pipe->writers) {
719 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
720 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
721 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
725 put_pipe_info(inode, pipe);
730 pipe_fasync(int fd, struct file *filp, int on)
732 struct pipe_inode_info *pipe = filp->private_data;
736 if (filp->f_mode & FMODE_READ)
737 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
738 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
739 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
740 if (retval < 0 && (filp->f_mode & FMODE_READ))
741 /* this can happen only if on == T */
742 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
748 struct pipe_inode_info *alloc_pipe_info(void)
750 struct pipe_inode_info *pipe;
752 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
754 pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
756 init_waitqueue_head(&pipe->wait);
757 pipe->r_counter = pipe->w_counter = 1;
758 pipe->buffers = PIPE_DEF_BUFFERS;
759 mutex_init(&pipe->mutex);
768 void free_pipe_info(struct pipe_inode_info *pipe)
772 for (i = 0; i < pipe->buffers; i++) {
773 struct pipe_buffer *buf = pipe->bufs + i;
775 buf->ops->release(pipe, buf);
778 __free_page(pipe->tmp_page);
783 static struct vfsmount *pipe_mnt __read_mostly;
786 * pipefs_dname() is called from d_path().
788 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
790 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
791 dentry->d_inode->i_ino);
794 static const struct dentry_operations pipefs_dentry_operations = {
795 .d_dname = pipefs_dname,
798 static struct inode * get_pipe_inode(void)
800 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
801 struct pipe_inode_info *pipe;
806 inode->i_ino = get_next_ino();
808 pipe = alloc_pipe_info();
812 inode->i_pipe = pipe;
814 pipe->readers = pipe->writers = 1;
815 inode->i_fop = &pipefifo_fops;
818 * Mark the inode dirty from the very beginning,
819 * that way it will never be moved to the dirty
820 * list because "mark_inode_dirty()" will think
821 * that it already _is_ on the dirty list.
823 inode->i_state = I_DIRTY;
824 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
825 inode->i_uid = current_fsuid();
826 inode->i_gid = current_fsgid();
827 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
838 int create_pipe_files(struct file **res, int flags)
841 struct inode *inode = get_pipe_inode();
844 static struct qstr name = { .name = "" };
850 path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
853 path.mnt = mntget(pipe_mnt);
855 d_instantiate(path.dentry, inode);
858 f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
862 f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
863 f->private_data = inode->i_pipe;
865 res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
870 res[0]->private_data = inode->i_pipe;
871 res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
878 free_pipe_info(inode->i_pipe);
883 free_pipe_info(inode->i_pipe);
888 static int __do_pipe_flags(int *fd, struct file **files, int flags)
893 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
896 error = create_pipe_files(files, flags);
900 error = get_unused_fd_flags(flags);
905 error = get_unused_fd_flags(flags);
910 audit_fd_pair(fdr, fdw);
923 int do_pipe_flags(int *fd, int flags)
925 struct file *files[2];
926 int error = __do_pipe_flags(fd, files, flags);
928 fd_install(fd[0], files[0]);
929 fd_install(fd[1], files[1]);
935 * sys_pipe() is the normal C calling standard for creating
936 * a pipe. It's not the way Unix traditionally does this, though.
938 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
940 struct file *files[2];
944 error = __do_pipe_flags(fd, files, flags);
946 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
949 put_unused_fd(fd[0]);
950 put_unused_fd(fd[1]);
953 fd_install(fd[0], files[0]);
954 fd_install(fd[1], files[1]);
960 SYSCALL_DEFINE1(pipe, int __user *, fildes)
962 return sys_pipe2(fildes, 0);
965 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
969 while (cur == *cnt) {
971 if (signal_pending(current))
974 return cur == *cnt ? -ERESTARTSYS : 0;
977 static void wake_up_partner(struct pipe_inode_info *pipe)
979 wake_up_interruptible(&pipe->wait);
982 static int fifo_open(struct inode *inode, struct file *filp)
984 struct pipe_inode_info *pipe;
985 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
990 spin_lock(&inode->i_lock);
992 pipe = inode->i_pipe;
994 spin_unlock(&inode->i_lock);
996 spin_unlock(&inode->i_lock);
997 pipe = alloc_pipe_info();
1001 spin_lock(&inode->i_lock);
1002 if (unlikely(inode->i_pipe)) {
1003 inode->i_pipe->files++;
1004 spin_unlock(&inode->i_lock);
1005 free_pipe_info(pipe);
1006 pipe = inode->i_pipe;
1008 inode->i_pipe = pipe;
1009 spin_unlock(&inode->i_lock);
1012 filp->private_data = pipe;
1013 /* OK, we have a pipe and it's pinned down */
1017 /* We can only do regular read/write on fifos */
1018 filp->f_mode &= (FMODE_READ | FMODE_WRITE);
1020 switch (filp->f_mode) {
1024 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1025 * opened, even when there is no process writing the FIFO.
1028 if (pipe->readers++ == 0)
1029 wake_up_partner(pipe);
1031 if (!is_pipe && !pipe->writers) {
1032 if ((filp->f_flags & O_NONBLOCK)) {
1033 /* suppress POLLHUP until we have
1035 filp->f_version = pipe->w_counter;
1037 if (wait_for_partner(pipe, &pipe->w_counter))
1046 * POSIX.1 says that O_NONBLOCK means return -1 with
1047 * errno=ENXIO when there is no process reading the FIFO.
1050 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1054 if (!pipe->writers++)
1055 wake_up_partner(pipe);
1057 if (!is_pipe && !pipe->readers) {
1058 if (wait_for_partner(pipe, &pipe->r_counter))
1063 case FMODE_READ | FMODE_WRITE:
1066 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1067 * This implementation will NEVER block on a O_RDWR open, since
1068 * the process can at least talk to itself.
1075 if (pipe->readers == 1 || pipe->writers == 1)
1076 wake_up_partner(pipe);
1085 __pipe_unlock(pipe);
1089 if (!--pipe->readers)
1090 wake_up_interruptible(&pipe->wait);
1095 if (!--pipe->writers)
1096 wake_up_interruptible(&pipe->wait);
1101 __pipe_unlock(pipe);
1103 put_pipe_info(inode, pipe);
1107 const struct file_operations pipefifo_fops = {
1109 .llseek = no_llseek,
1110 .read = do_sync_read,
1111 .aio_read = pipe_read,
1112 .write = do_sync_write,
1113 .aio_write = pipe_write,
1115 .unlocked_ioctl = pipe_ioctl,
1116 .release = pipe_release,
1117 .fasync = pipe_fasync,
1121 * Allocate a new array of pipe buffers and copy the info over. Returns the
1122 * pipe size if successful, or return -ERROR on error.
1124 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
1126 struct pipe_buffer *bufs;
1129 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1130 * expect a lot of shrink+grow operations, just free and allocate
1131 * again like we would do for growing. If the pipe currently
1132 * contains more buffers than arg, then return busy.
1134 if (nr_pages < pipe->nrbufs)
1137 bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
1138 if (unlikely(!bufs))
1142 * The pipe array wraps around, so just start the new one at zero
1143 * and adjust the indexes.
1149 tail = pipe->curbuf + pipe->nrbufs;
1150 if (tail < pipe->buffers)
1153 tail &= (pipe->buffers - 1);
1155 head = pipe->nrbufs - tail;
1157 memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1159 memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1165 pipe->buffers = nr_pages;
1166 return nr_pages * PAGE_SIZE;
1170 * Currently we rely on the pipe array holding a power-of-2 number
1173 static inline unsigned int round_pipe_size(unsigned int size)
1175 unsigned long nr_pages;
1177 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1178 return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1182 * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1183 * will return an error.
1185 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1186 size_t *lenp, loff_t *ppos)
1190 ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1191 if (ret < 0 || !write)
1194 pipe_max_size = round_pipe_size(pipe_max_size);
1199 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1200 * location, so checking ->i_pipe is not enough to verify that this is a
1203 struct pipe_inode_info *get_pipe_info(struct file *file)
1205 return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1208 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1210 struct pipe_inode_info *pipe;
1213 pipe = get_pipe_info(file);
1220 case F_SETPIPE_SZ: {
1221 unsigned int size, nr_pages;
1223 size = round_pipe_size(arg);
1224 nr_pages = size >> PAGE_SHIFT;
1230 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
1234 ret = pipe_set_size(pipe, nr_pages);
1238 ret = pipe->buffers * PAGE_SIZE;
1246 __pipe_unlock(pipe);
1250 static const struct super_operations pipefs_ops = {
1251 .destroy_inode = free_inode_nonrcu,
1252 .statfs = simple_statfs,
1256 * pipefs should _never_ be mounted by userland - too much of security hassle,
1257 * no real gain from having the whole whorehouse mounted. So we don't need
1258 * any operations on the root directory. However, we need a non-trivial
1259 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1261 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1262 int flags, const char *dev_name, void *data)
1264 return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1265 &pipefs_dentry_operations, PIPEFS_MAGIC);
1268 static struct file_system_type pipe_fs_type = {
1270 .mount = pipefs_mount,
1271 .kill_sb = kill_anon_super,
1274 static int __init init_pipe_fs(void)
1276 int err = register_filesystem(&pipe_fs_type);
1279 pipe_mnt = kern_mount(&pipe_fs_type);
1280 if (IS_ERR(pipe_mnt)) {
1281 err = PTR_ERR(pipe_mnt);
1282 unregister_filesystem(&pipe_fs_type);
1288 fs_initcall(init_pipe_fs);