splice: fix repeated kmap()'s in default_file_splice_read()
[linux-2.6-block.git] / fs / splice.c
CommitLineData
5274f052
JA
1/*
2 * "splice": joining two ropes together by interweaving their strands.
3 *
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
7 *
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
10 *
11 * Named by Larry McVoy, original implementation from Linus, extended by
c2058e06
JA
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
5274f052 14 *
0fe23479 15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
c2058e06
JA
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
5274f052
JA
18 *
19 */
20#include <linux/fs.h>
21#include <linux/file.h>
22#include <linux/pagemap.h>
d6b29d7c 23#include <linux/splice.h>
08e552c6 24#include <linux/memcontrol.h>
5274f052 25#include <linux/mm_inline.h>
5abc97aa 26#include <linux/swap.h>
4f6f0bd2
JA
27#include <linux/writeback.h>
28#include <linux/buffer_head.h>
a0f06780 29#include <linux/module.h>
4f6f0bd2 30#include <linux/syscalls.h>
912d35f8 31#include <linux/uio.h>
29ce2058 32#include <linux/security.h>
5274f052 33
83f9135b
JA
34/*
35 * Attempt to steal a page from a pipe buffer. This should perhaps go into
36 * a vm helper function, it's already simplified quite a bit by the
37 * addition of remove_mapping(). If success is returned, the caller may
38 * attempt to reuse this page for another destination.
39 */
76ad4d11 40static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
5abc97aa
JA
41 struct pipe_buffer *buf)
42{
43 struct page *page = buf->page;
9e94cd4f 44 struct address_space *mapping;
5abc97aa 45
9e0267c2
JA
46 lock_page(page);
47
9e94cd4f
JA
48 mapping = page_mapping(page);
49 if (mapping) {
50 WARN_ON(!PageUptodate(page));
5abc97aa 51
9e94cd4f
JA
52 /*
53 * At least for ext2 with nobh option, we need to wait on
54 * writeback completing on this page, since we'll remove it
55 * from the pagecache. Otherwise truncate wont wait on the
56 * page, allowing the disk blocks to be reused by someone else
57 * before we actually wrote our data to them. fs corruption
58 * ensues.
59 */
60 wait_on_page_writeback(page);
ad8d6f0a 61
266cf658
DH
62 if (page_has_private(page) &&
63 !try_to_release_page(page, GFP_KERNEL))
ca39d651 64 goto out_unlock;
4f6f0bd2 65
9e94cd4f
JA
66 /*
67 * If we succeeded in removing the mapping, set LRU flag
68 * and return good.
69 */
70 if (remove_mapping(mapping, page)) {
71 buf->flags |= PIPE_BUF_FLAG_LRU;
72 return 0;
73 }
9e0267c2 74 }
5abc97aa 75
9e94cd4f
JA
76 /*
77 * Raced with truncate or failed to remove page from current
78 * address space, unlock and return failure.
79 */
ca39d651 80out_unlock:
9e94cd4f
JA
81 unlock_page(page);
82 return 1;
5abc97aa
JA
83}
84
76ad4d11 85static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
5274f052
JA
86 struct pipe_buffer *buf)
87{
88 page_cache_release(buf->page);
1432873a 89 buf->flags &= ~PIPE_BUF_FLAG_LRU;
5274f052
JA
90}
91
0845718d
JA
92/*
93 * Check whether the contents of buf is OK to access. Since the content
94 * is a page cache page, IO may be in flight.
95 */
cac36bb0
JA
96static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe,
97 struct pipe_buffer *buf)
5274f052
JA
98{
99 struct page *page = buf->page;
49d0b21b 100 int err;
5274f052
JA
101
102 if (!PageUptodate(page)) {
49d0b21b
JA
103 lock_page(page);
104
105 /*
106 * Page got truncated/unhashed. This will cause a 0-byte
73d62d83 107 * splice, if this is the first page.
49d0b21b
JA
108 */
109 if (!page->mapping) {
110 err = -ENODATA;
111 goto error;
112 }
5274f052 113
49d0b21b 114 /*
73d62d83 115 * Uh oh, read-error from disk.
49d0b21b
JA
116 */
117 if (!PageUptodate(page)) {
118 err = -EIO;
119 goto error;
120 }
121
122 /*
f84d7519 123 * Page is ok afterall, we are done.
49d0b21b 124 */
5274f052 125 unlock_page(page);
5274f052
JA
126 }
127
f84d7519 128 return 0;
49d0b21b
JA
129error:
130 unlock_page(page);
f84d7519 131 return err;
70524490
JA
132}
133
d4c3cca9 134static const struct pipe_buf_operations page_cache_pipe_buf_ops = {
5274f052 135 .can_merge = 0,
f84d7519
JA
136 .map = generic_pipe_buf_map,
137 .unmap = generic_pipe_buf_unmap,
cac36bb0 138 .confirm = page_cache_pipe_buf_confirm,
5274f052 139 .release = page_cache_pipe_buf_release,
5abc97aa 140 .steal = page_cache_pipe_buf_steal,
f84d7519 141 .get = generic_pipe_buf_get,
5274f052
JA
142};
143
912d35f8
JA
144static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
145 struct pipe_buffer *buf)
146{
7afa6fd0
JA
147 if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
148 return 1;
149
1432873a 150 buf->flags |= PIPE_BUF_FLAG_LRU;
330ab716 151 return generic_pipe_buf_steal(pipe, buf);
912d35f8
JA
152}
153
d4c3cca9 154static const struct pipe_buf_operations user_page_pipe_buf_ops = {
912d35f8 155 .can_merge = 0,
f84d7519
JA
156 .map = generic_pipe_buf_map,
157 .unmap = generic_pipe_buf_unmap,
cac36bb0 158 .confirm = generic_pipe_buf_confirm,
912d35f8
JA
159 .release = page_cache_pipe_buf_release,
160 .steal = user_page_pipe_buf_steal,
f84d7519 161 .get = generic_pipe_buf_get,
912d35f8
JA
162};
163
932cc6d4
JA
164/**
165 * splice_to_pipe - fill passed data into a pipe
166 * @pipe: pipe to fill
167 * @spd: data to fill
168 *
169 * Description:
79685b8d 170 * @spd contains a map of pages and len/offset tuples, along with
932cc6d4
JA
171 * the struct pipe_buf_operations associated with these pages. This
172 * function will link that data to the pipe.
173 *
83f9135b 174 */
d6b29d7c
JA
175ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
176 struct splice_pipe_desc *spd)
5274f052 177{
00de00bd 178 unsigned int spd_pages = spd->nr_pages;
912d35f8 179 int ret, do_wakeup, page_nr;
5274f052
JA
180
181 ret = 0;
182 do_wakeup = 0;
912d35f8 183 page_nr = 0;
5274f052 184
61e0d47c 185 pipe_lock(pipe);
5274f052 186
5274f052 187 for (;;) {
3a326a2c 188 if (!pipe->readers) {
5274f052
JA
189 send_sig(SIGPIPE, current, 0);
190 if (!ret)
191 ret = -EPIPE;
192 break;
193 }
194
6f767b04
JA
195 if (pipe->nrbufs < PIPE_BUFFERS) {
196 int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
3a326a2c 197 struct pipe_buffer *buf = pipe->bufs + newbuf;
5274f052 198
912d35f8
JA
199 buf->page = spd->pages[page_nr];
200 buf->offset = spd->partial[page_nr].offset;
201 buf->len = spd->partial[page_nr].len;
497f9625 202 buf->private = spd->partial[page_nr].private;
912d35f8 203 buf->ops = spd->ops;
7afa6fd0
JA
204 if (spd->flags & SPLICE_F_GIFT)
205 buf->flags |= PIPE_BUF_FLAG_GIFT;
206
6f767b04 207 pipe->nrbufs++;
912d35f8
JA
208 page_nr++;
209 ret += buf->len;
210
6f767b04
JA
211 if (pipe->inode)
212 do_wakeup = 1;
5274f052 213
912d35f8 214 if (!--spd->nr_pages)
5274f052 215 break;
6f767b04 216 if (pipe->nrbufs < PIPE_BUFFERS)
5274f052
JA
217 continue;
218
219 break;
220 }
221
912d35f8 222 if (spd->flags & SPLICE_F_NONBLOCK) {
29e35094
LT
223 if (!ret)
224 ret = -EAGAIN;
225 break;
226 }
227
5274f052
JA
228 if (signal_pending(current)) {
229 if (!ret)
230 ret = -ERESTARTSYS;
231 break;
232 }
233
234 if (do_wakeup) {
c0bd1f65 235 smp_mb();
3a326a2c
IM
236 if (waitqueue_active(&pipe->wait))
237 wake_up_interruptible_sync(&pipe->wait);
238 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
5274f052
JA
239 do_wakeup = 0;
240 }
241
3a326a2c
IM
242 pipe->waiting_writers++;
243 pipe_wait(pipe);
244 pipe->waiting_writers--;
5274f052
JA
245 }
246
61e0d47c 247 pipe_unlock(pipe);
5274f052 248
61e0d47c
MS
249 if (do_wakeup) {
250 smp_mb();
251 if (waitqueue_active(&pipe->wait))
252 wake_up_interruptible(&pipe->wait);
253 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
5274f052
JA
254 }
255
00de00bd 256 while (page_nr < spd_pages)
bbdfc2f7 257 spd->spd_release(spd, page_nr++);
5274f052
JA
258
259 return ret;
260}
261
bbdfc2f7
JA
262static void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
263{
264 page_cache_release(spd->pages[i]);
265}
266
3a326a2c 267static int
cbb7e577
JA
268__generic_file_splice_read(struct file *in, loff_t *ppos,
269 struct pipe_inode_info *pipe, size_t len,
270 unsigned int flags)
5274f052
JA
271{
272 struct address_space *mapping = in->f_mapping;
d8983910 273 unsigned int loff, nr_pages, req_pages;
16c523dd 274 struct page *pages[PIPE_BUFFERS];
912d35f8 275 struct partial_page partial[PIPE_BUFFERS];
5274f052 276 struct page *page;
91ad66ef
JA
277 pgoff_t index, end_index;
278 loff_t isize;
eb20796b 279 int error, page_nr;
912d35f8
JA
280 struct splice_pipe_desc spd = {
281 .pages = pages,
282 .partial = partial,
283 .flags = flags,
284 .ops = &page_cache_pipe_buf_ops,
bbdfc2f7 285 .spd_release = spd_release_page,
912d35f8 286 };
5274f052 287
cbb7e577 288 index = *ppos >> PAGE_CACHE_SHIFT;
912d35f8 289 loff = *ppos & ~PAGE_CACHE_MASK;
d8983910
FW
290 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
291 nr_pages = min(req_pages, (unsigned)PIPE_BUFFERS);
5274f052 292
eb20796b
JA
293 /*
294 * Lookup the (hopefully) full range of pages we need.
295 */
296 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
431a4820 297 index += spd.nr_pages;
82aa5d61 298
eb20796b
JA
299 /*
300 * If find_get_pages_contig() returned fewer pages than we needed,
431a4820 301 * readahead/allocate the rest and fill in the holes.
eb20796b 302 */
431a4820 303 if (spd.nr_pages < nr_pages)
cf914a7d
RR
304 page_cache_sync_readahead(mapping, &in->f_ra, in,
305 index, req_pages - spd.nr_pages);
431a4820 306
932cc6d4 307 error = 0;
eb20796b 308 while (spd.nr_pages < nr_pages) {
82aa5d61 309 /*
eb20796b
JA
310 * Page could be there, find_get_pages_contig() breaks on
311 * the first hole.
5274f052 312 */
7480a904
JA
313 page = find_get_page(mapping, index);
314 if (!page) {
7480a904 315 /*
eb20796b 316 * page didn't exist, allocate one.
7480a904
JA
317 */
318 page = page_cache_alloc_cold(mapping);
319 if (!page)
320 break;
321
322 error = add_to_page_cache_lru(page, mapping, index,
4cd13504 323 mapping_gfp_mask(mapping));
7480a904
JA
324 if (unlikely(error)) {
325 page_cache_release(page);
a0548871
JA
326 if (error == -EEXIST)
327 continue;
7480a904
JA
328 break;
329 }
eb20796b
JA
330 /*
331 * add_to_page_cache() locks the page, unlock it
332 * to avoid convoluting the logic below even more.
333 */
334 unlock_page(page);
7480a904
JA
335 }
336
eb20796b
JA
337 pages[spd.nr_pages++] = page;
338 index++;
339 }
340
341 /*
342 * Now loop over the map and see if we need to start IO on any
343 * pages, fill in the partial map, etc.
344 */
345 index = *ppos >> PAGE_CACHE_SHIFT;
346 nr_pages = spd.nr_pages;
347 spd.nr_pages = 0;
348 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
349 unsigned int this_len;
350
351 if (!len)
352 break;
353
354 /*
355 * this_len is the max we'll use from this page
356 */
357 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
358 page = pages[page_nr];
359
a08a166f 360 if (PageReadahead(page))
cf914a7d 361 page_cache_async_readahead(mapping, &in->f_ra, in,
d8983910 362 page, index, req_pages - page_nr);
a08a166f 363
7480a904
JA
364 /*
365 * If the page isn't uptodate, we may need to start io on it
366 */
367 if (!PageUptodate(page)) {
c4f895cb
JA
368 /*
369 * If in nonblock mode then dont block on waiting
370 * for an in-flight io page
371 */
9ae9d68c 372 if (flags & SPLICE_F_NONBLOCK) {
529ae9aa 373 if (!trylock_page(page)) {
8191ecd1 374 error = -EAGAIN;
9ae9d68c 375 break;
8191ecd1 376 }
9ae9d68c
FW
377 } else
378 lock_page(page);
7480a904
JA
379
380 /*
32502b84
MS
381 * Page was truncated, or invalidated by the
382 * filesystem. Redo the find/create, but this time the
383 * page is kept locked, so there's no chance of another
384 * race with truncate/invalidate.
7480a904
JA
385 */
386 if (!page->mapping) {
387 unlock_page(page);
32502b84
MS
388 page = find_or_create_page(mapping, index,
389 mapping_gfp_mask(mapping));
390
391 if (!page) {
392 error = -ENOMEM;
393 break;
394 }
395 page_cache_release(pages[page_nr]);
396 pages[page_nr] = page;
7480a904
JA
397 }
398 /*
399 * page was already under io and is now done, great
400 */
401 if (PageUptodate(page)) {
402 unlock_page(page);
403 goto fill_it;
404 }
5274f052 405
7480a904
JA
406 /*
407 * need to read in the page
408 */
409 error = mapping->a_ops->readpage(in, page);
5274f052 410 if (unlikely(error)) {
eb20796b
JA
411 /*
412 * We really should re-lookup the page here,
413 * but it complicates things a lot. Instead
414 * lets just do what we already stored, and
415 * we'll get it the next time we are called.
416 */
7480a904 417 if (error == AOP_TRUNCATED_PAGE)
eb20796b
JA
418 error = 0;
419
5274f052
JA
420 break;
421 }
620a324b
JA
422 }
423fill_it:
424 /*
425 * i_size must be checked after PageUptodate.
426 */
427 isize = i_size_read(mapping->host);
428 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
429 if (unlikely(!isize || index > end_index))
430 break;
431
432 /*
433 * if this is the last page, see if we need to shrink
434 * the length and stop
435 */
436 if (end_index == index) {
437 unsigned int plen;
91ad66ef
JA
438
439 /*
620a324b 440 * max good bytes in this page
91ad66ef 441 */
620a324b
JA
442 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
443 if (plen <= loff)
91ad66ef 444 break;
91ad66ef
JA
445
446 /*
620a324b 447 * force quit after adding this page
91ad66ef 448 */
620a324b
JA
449 this_len = min(this_len, plen - loff);
450 len = this_len;
5274f052 451 }
620a324b 452
eb20796b
JA
453 partial[page_nr].offset = loff;
454 partial[page_nr].len = this_len;
82aa5d61 455 len -= this_len;
91ad66ef 456 loff = 0;
eb20796b
JA
457 spd.nr_pages++;
458 index++;
5274f052
JA
459 }
460
eb20796b 461 /*
475ecade 462 * Release any pages at the end, if we quit early. 'page_nr' is how far
eb20796b
JA
463 * we got, 'nr_pages' is how many pages are in the map.
464 */
465 while (page_nr < nr_pages)
466 page_cache_release(pages[page_nr++]);
f4e6b498 467 in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
eb20796b 468
912d35f8 469 if (spd.nr_pages)
00522fb4 470 return splice_to_pipe(pipe, &spd);
5274f052 471
7480a904 472 return error;
5274f052
JA
473}
474
83f9135b
JA
475/**
476 * generic_file_splice_read - splice data from file to a pipe
477 * @in: file to splice from
932cc6d4 478 * @ppos: position in @in
83f9135b
JA
479 * @pipe: pipe to splice to
480 * @len: number of bytes to splice
481 * @flags: splice modifier flags
482 *
932cc6d4
JA
483 * Description:
484 * Will read pages from given file and fill them into a pipe. Can be
485 * used as long as the address_space operations for the source implements
486 * a readpage() hook.
487 *
83f9135b 488 */
cbb7e577
JA
489ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
490 struct pipe_inode_info *pipe, size_t len,
491 unsigned int flags)
5274f052 492{
d366d398 493 loff_t isize, left;
8191ecd1 494 int ret;
d366d398
JA
495
496 isize = i_size_read(in->f_mapping->host);
497 if (unlikely(*ppos >= isize))
498 return 0;
499
500 left = isize - *ppos;
501 if (unlikely(left < len))
502 len = left;
5274f052 503
8191ecd1
JA
504 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
505 if (ret > 0)
cbb7e577 506 *ppos += ret;
5274f052
JA
507
508 return ret;
509}
059a8f37
JA
510EXPORT_SYMBOL(generic_file_splice_read);
511
6818173b
MS
512static const struct pipe_buf_operations default_pipe_buf_ops = {
513 .can_merge = 0,
514 .map = generic_pipe_buf_map,
515 .unmap = generic_pipe_buf_unmap,
516 .confirm = generic_pipe_buf_confirm,
517 .release = generic_pipe_buf_release,
518 .steal = generic_pipe_buf_steal,
519 .get = generic_pipe_buf_get,
520};
521
522static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
523 unsigned long vlen, loff_t offset)
524{
525 mm_segment_t old_fs;
526 loff_t pos = offset;
527 ssize_t res;
528
529 old_fs = get_fs();
530 set_fs(get_ds());
531 /* The cast to a user pointer is valid due to the set_fs() */
532 res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
533 set_fs(old_fs);
534
535 return res;
536}
537
0b0a47f5
MS
538static ssize_t kernel_writev(struct file *file, const struct iovec *vec,
539 unsigned long vlen, loff_t *ppos)
540{
541 mm_segment_t old_fs;
542 ssize_t res;
543
544 old_fs = get_fs();
545 set_fs(get_ds());
546 /* The cast to a user pointer is valid due to the set_fs() */
547 res = vfs_writev(file, (const struct iovec __user *)vec, vlen, ppos);
548 set_fs(old_fs);
549
550 return res;
551}
552
6818173b
MS
553ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
554 struct pipe_inode_info *pipe, size_t len,
555 unsigned int flags)
556{
557 unsigned int nr_pages;
558 unsigned int nr_freed;
559 size_t offset;
560 struct page *pages[PIPE_BUFFERS];
561 struct partial_page partial[PIPE_BUFFERS];
562 struct iovec vec[PIPE_BUFFERS];
563 pgoff_t index;
564 ssize_t res;
565 size_t this_len;
566 int error;
567 int i;
568 struct splice_pipe_desc spd = {
569 .pages = pages,
570 .partial = partial,
571 .flags = flags,
572 .ops = &default_pipe_buf_ops,
573 .spd_release = spd_release_page,
574 };
575
576 index = *ppos >> PAGE_CACHE_SHIFT;
577 offset = *ppos & ~PAGE_CACHE_MASK;
578 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
579
580 for (i = 0; i < nr_pages && i < PIPE_BUFFERS && len; i++) {
581 struct page *page;
582
4f231228 583 page = alloc_page(GFP_USER);
6818173b
MS
584 error = -ENOMEM;
585 if (!page)
586 goto err;
587
588 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
4f231228 589 vec[i].iov_base = (void __user *) page_address(page);
6818173b
MS
590 vec[i].iov_len = this_len;
591 pages[i] = page;
592 spd.nr_pages++;
593 len -= this_len;
594 offset = 0;
595 }
596
597 res = kernel_readv(in, vec, spd.nr_pages, *ppos);
598 if (res < 0)
599 goto err;
600
601 error = 0;
602 if (!res)
603 goto err;
604
605 nr_freed = 0;
606 for (i = 0; i < spd.nr_pages; i++) {
6818173b
MS
607 this_len = min_t(size_t, vec[i].iov_len, res);
608 partial[i].offset = 0;
609 partial[i].len = this_len;
610 if (!this_len) {
611 __free_page(pages[i]);
612 pages[i] = NULL;
613 nr_freed++;
614 }
615 res -= this_len;
616 }
617 spd.nr_pages -= nr_freed;
618
619 res = splice_to_pipe(pipe, &spd);
620 if (res > 0)
621 *ppos += res;
622
623 return res;
624
625err:
4f231228 626 for (i = 0; i < spd.nr_pages; i++)
6818173b 627 __free_page(pages[i]);
4f231228 628
6818173b
MS
629 return error;
630}
631EXPORT_SYMBOL(default_file_splice_read);
632
5274f052 633/*
4f6f0bd2 634 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
016b661e 635 * using sendpage(). Return the number of bytes sent.
5274f052 636 */
76ad4d11 637static int pipe_to_sendpage(struct pipe_inode_info *pipe,
5274f052
JA
638 struct pipe_buffer *buf, struct splice_desc *sd)
639{
6a14b90b 640 struct file *file = sd->u.file;
5274f052 641 loff_t pos = sd->pos;
f84d7519 642 int ret, more;
5274f052 643
cac36bb0 644 ret = buf->ops->confirm(pipe, buf);
f84d7519
JA
645 if (!ret) {
646 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
5274f052 647
f84d7519
JA
648 ret = file->f_op->sendpage(file, buf->page, buf->offset,
649 sd->len, &pos, more);
650 }
5274f052 651
016b661e 652 return ret;
5274f052
JA
653}
654
655/*
656 * This is a little more tricky than the file -> pipe splicing. There are
657 * basically three cases:
658 *
659 * - Destination page already exists in the address space and there
660 * are users of it. For that case we have no other option that
661 * copying the data. Tough luck.
662 * - Destination page already exists in the address space, but there
663 * are no users of it. Make sure it's uptodate, then drop it. Fall
664 * through to last case.
665 * - Destination page does not exist, we can add the pipe page to
666 * the page cache and avoid the copy.
667 *
83f9135b
JA
668 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
669 * sd->flags), we attempt to migrate pages from the pipe to the output
670 * file address space page cache. This is possible if no one else has
671 * the pipe page referenced outside of the pipe and page cache. If
672 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
673 * a new page in the output file page cache and fill/dirty that.
5274f052 674 */
328eaaba
MS
675int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
676 struct splice_desc *sd)
5274f052 677{
6a14b90b 678 struct file *file = sd->u.file;
5274f052 679 struct address_space *mapping = file->f_mapping;
016b661e 680 unsigned int offset, this_len;
5274f052 681 struct page *page;
afddba49 682 void *fsdata;
3e7ee3e7 683 int ret;
5274f052
JA
684
685 /*
49d0b21b 686 * make sure the data in this buffer is uptodate
5274f052 687 */
cac36bb0 688 ret = buf->ops->confirm(pipe, buf);
f84d7519
JA
689 if (unlikely(ret))
690 return ret;
5274f052 691
5274f052
JA
692 offset = sd->pos & ~PAGE_CACHE_MASK;
693
016b661e
JA
694 this_len = sd->len;
695 if (this_len + offset > PAGE_CACHE_SIZE)
696 this_len = PAGE_CACHE_SIZE - offset;
697
afddba49
NP
698 ret = pagecache_write_begin(file, mapping, sd->pos, this_len,
699 AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
700 if (unlikely(ret))
701 goto out;
5274f052 702
0568b409 703 if (buf->page != page) {
f84d7519
JA
704 /*
705 * Careful, ->map() uses KM_USER0!
706 */
76ad4d11 707 char *src = buf->ops->map(pipe, buf, 1);
f84d7519 708 char *dst = kmap_atomic(page, KM_USER1);
5abc97aa 709
016b661e 710 memcpy(dst + offset, src + buf->offset, this_len);
5abc97aa 711 flush_dcache_page(page);
f84d7519 712 kunmap_atomic(dst, KM_USER1);
76ad4d11 713 buf->ops->unmap(pipe, buf, src);
5abc97aa 714 }
afddba49
NP
715 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
716 page, fsdata);
5274f052 717out:
5274f052
JA
718 return ret;
719}
328eaaba 720EXPORT_SYMBOL(pipe_to_file);
5274f052 721
b3c2d2dd
MS
722static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
723{
724 smp_mb();
725 if (waitqueue_active(&pipe->wait))
726 wake_up_interruptible(&pipe->wait);
727 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
728}
729
932cc6d4 730/**
b3c2d2dd 731 * splice_from_pipe_feed - feed available data from a pipe to a file
932cc6d4
JA
732 * @pipe: pipe to splice from
733 * @sd: information to @actor
734 * @actor: handler that splices the data
735 *
736 * Description:
b3c2d2dd
MS
737 * This function loops over the pipe and calls @actor to do the
738 * actual moving of a single struct pipe_buffer to the desired
739 * destination. It returns when there's no more buffers left in
740 * the pipe or if the requested number of bytes (@sd->total_len)
741 * have been copied. It returns a positive number (one) if the
742 * pipe needs to be filled with more data, zero if the required
743 * number of bytes have been copied and -errno on error.
932cc6d4 744 *
b3c2d2dd
MS
745 * This, together with splice_from_pipe_{begin,end,next}, may be
746 * used to implement the functionality of __splice_from_pipe() when
747 * locking is required around copying the pipe buffers to the
748 * destination.
83f9135b 749 */
b3c2d2dd
MS
750int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
751 splice_actor *actor)
5274f052 752{
b3c2d2dd 753 int ret;
5274f052 754
b3c2d2dd
MS
755 while (pipe->nrbufs) {
756 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
757 const struct pipe_buf_operations *ops = buf->ops;
5274f052 758
b3c2d2dd
MS
759 sd->len = buf->len;
760 if (sd->len > sd->total_len)
761 sd->len = sd->total_len;
5274f052 762
b3c2d2dd
MS
763 ret = actor(pipe, buf, sd);
764 if (ret <= 0) {
765 if (ret == -ENODATA)
766 ret = 0;
767 return ret;
768 }
769 buf->offset += ret;
770 buf->len -= ret;
771
772 sd->num_spliced += ret;
773 sd->len -= ret;
774 sd->pos += ret;
775 sd->total_len -= ret;
776
777 if (!buf->len) {
778 buf->ops = NULL;
779 ops->release(pipe, buf);
780 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
781 pipe->nrbufs--;
782 if (pipe->inode)
783 sd->need_wakeup = true;
784 }
5274f052 785
b3c2d2dd
MS
786 if (!sd->total_len)
787 return 0;
788 }
5274f052 789
b3c2d2dd
MS
790 return 1;
791}
792EXPORT_SYMBOL(splice_from_pipe_feed);
5274f052 793
b3c2d2dd
MS
794/**
795 * splice_from_pipe_next - wait for some data to splice from
796 * @pipe: pipe to splice from
797 * @sd: information about the splice operation
798 *
799 * Description:
800 * This function will wait for some data and return a positive
801 * value (one) if pipe buffers are available. It will return zero
802 * or -errno if no more data needs to be spliced.
803 */
804int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
805{
806 while (!pipe->nrbufs) {
807 if (!pipe->writers)
808 return 0;
016b661e 809
b3c2d2dd
MS
810 if (!pipe->waiting_writers && sd->num_spliced)
811 return 0;
73d62d83 812
b3c2d2dd
MS
813 if (sd->flags & SPLICE_F_NONBLOCK)
814 return -EAGAIN;
5274f052 815
b3c2d2dd
MS
816 if (signal_pending(current))
817 return -ERESTARTSYS;
5274f052 818
b3c2d2dd
MS
819 if (sd->need_wakeup) {
820 wakeup_pipe_writers(pipe);
821 sd->need_wakeup = false;
5274f052
JA
822 }
823
b3c2d2dd
MS
824 pipe_wait(pipe);
825 }
29e35094 826
b3c2d2dd
MS
827 return 1;
828}
829EXPORT_SYMBOL(splice_from_pipe_next);
5274f052 830
b3c2d2dd
MS
831/**
832 * splice_from_pipe_begin - start splicing from pipe
b80901bb 833 * @sd: information about the splice operation
b3c2d2dd
MS
834 *
835 * Description:
836 * This function should be called before a loop containing
837 * splice_from_pipe_next() and splice_from_pipe_feed() to
838 * initialize the necessary fields of @sd.
839 */
840void splice_from_pipe_begin(struct splice_desc *sd)
841{
842 sd->num_spliced = 0;
843 sd->need_wakeup = false;
844}
845EXPORT_SYMBOL(splice_from_pipe_begin);
5274f052 846
b3c2d2dd
MS
847/**
848 * splice_from_pipe_end - finish splicing from pipe
849 * @pipe: pipe to splice from
850 * @sd: information about the splice operation
851 *
852 * Description:
853 * This function will wake up pipe writers if necessary. It should
854 * be called after a loop containing splice_from_pipe_next() and
855 * splice_from_pipe_feed().
856 */
857void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd)
858{
859 if (sd->need_wakeup)
860 wakeup_pipe_writers(pipe);
861}
862EXPORT_SYMBOL(splice_from_pipe_end);
5274f052 863
b3c2d2dd
MS
864/**
865 * __splice_from_pipe - splice data from a pipe to given actor
866 * @pipe: pipe to splice from
867 * @sd: information to @actor
868 * @actor: handler that splices the data
869 *
870 * Description:
871 * This function does little more than loop over the pipe and call
872 * @actor to do the actual moving of a single struct pipe_buffer to
873 * the desired destination. See pipe_to_file, pipe_to_sendpage, or
874 * pipe_to_user.
875 *
876 */
877ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
878 splice_actor *actor)
879{
880 int ret;
5274f052 881
b3c2d2dd
MS
882 splice_from_pipe_begin(sd);
883 do {
884 ret = splice_from_pipe_next(pipe, sd);
885 if (ret > 0)
886 ret = splice_from_pipe_feed(pipe, sd, actor);
887 } while (ret > 0);
888 splice_from_pipe_end(pipe, sd);
889
890 return sd->num_spliced ? sd->num_spliced : ret;
5274f052 891}
40bee44e 892EXPORT_SYMBOL(__splice_from_pipe);
5274f052 893
932cc6d4
JA
894/**
895 * splice_from_pipe - splice data from a pipe to a file
896 * @pipe: pipe to splice from
897 * @out: file to splice to
898 * @ppos: position in @out
899 * @len: how many bytes to splice
900 * @flags: splice modifier flags
901 * @actor: handler that splices the data
902 *
903 * Description:
2933970b 904 * See __splice_from_pipe. This function locks the pipe inode,
932cc6d4
JA
905 * otherwise it's identical to __splice_from_pipe().
906 *
907 */
6da61809
MF
908ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
909 loff_t *ppos, size_t len, unsigned int flags,
910 splice_actor *actor)
911{
912 ssize_t ret;
c66ab6fa
JA
913 struct splice_desc sd = {
914 .total_len = len,
915 .flags = flags,
916 .pos = *ppos,
6a14b90b 917 .u.file = out,
c66ab6fa 918 };
6da61809 919
61e0d47c 920 pipe_lock(pipe);
c66ab6fa 921 ret = __splice_from_pipe(pipe, &sd, actor);
61e0d47c 922 pipe_unlock(pipe);
6da61809
MF
923
924 return ret;
925}
926
83f9135b
JA
927/**
928 * generic_file_splice_write - splice data from a pipe to a file
3a326a2c 929 * @pipe: pipe info
83f9135b 930 * @out: file to write to
932cc6d4 931 * @ppos: position in @out
83f9135b
JA
932 * @len: number of bytes to splice
933 * @flags: splice modifier flags
934 *
932cc6d4
JA
935 * Description:
936 * Will either move or copy pages (determined by @flags options) from
937 * the given pipe inode to the given file.
83f9135b
JA
938 *
939 */
3a326a2c
IM
940ssize_t
941generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
cbb7e577 942 loff_t *ppos, size_t len, unsigned int flags)
5274f052 943{
4f6f0bd2 944 struct address_space *mapping = out->f_mapping;
8c34e2d6 945 struct inode *inode = mapping->host;
7f3d4ee1
MS
946 struct splice_desc sd = {
947 .total_len = len,
948 .flags = flags,
949 .pos = *ppos,
950 .u.file = out,
951 };
3a326a2c
IM
952 ssize_t ret;
953
61e0d47c 954 pipe_lock(pipe);
eb443e5a
MS
955
956 splice_from_pipe_begin(&sd);
957 do {
958 ret = splice_from_pipe_next(pipe, &sd);
959 if (ret <= 0)
960 break;
961
962 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
963 ret = file_remove_suid(out);
964 if (!ret)
965 ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file);
966 mutex_unlock(&inode->i_mutex);
967 } while (ret > 0);
968 splice_from_pipe_end(pipe, &sd);
969
61e0d47c 970 pipe_unlock(pipe);
eb443e5a
MS
971
972 if (sd.num_spliced)
973 ret = sd.num_spliced;
974
a4514ebd 975 if (ret > 0) {
17ee4f49
JA
976 unsigned long nr_pages;
977
a4514ebd 978 *ppos += ret;
17ee4f49 979 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
a4514ebd
JA
980
981 /*
982 * If file or inode is SYNC and we actually wrote some data,
983 * sync it.
984 */
985 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
7f3d4ee1
MS
986 int err;
987
a4514ebd
JA
988 mutex_lock(&inode->i_mutex);
989 err = generic_osync_inode(inode, mapping,
990 OSYNC_METADATA|OSYNC_DATA);
991 mutex_unlock(&inode->i_mutex);
4f6f0bd2 992
a4514ebd
JA
993 if (err)
994 ret = err;
995 }
17ee4f49 996 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
4f6f0bd2
JA
997 }
998
999 return ret;
5274f052
JA
1000}
1001
059a8f37
JA
1002EXPORT_SYMBOL(generic_file_splice_write);
1003
0b0a47f5
MS
1004static struct pipe_buffer *nth_pipe_buf(struct pipe_inode_info *pipe, int n)
1005{
1006 return &pipe->bufs[(pipe->curbuf + n) % PIPE_BUFFERS];
1007}
1008
1009static ssize_t default_file_splice_write(struct pipe_inode_info *pipe,
1010 struct file *out, loff_t *ppos,
1011 size_t len, unsigned int flags)
1012{
1013 ssize_t ret = 0;
1014 ssize_t total_len = 0;
1015 int do_wakeup = 0;
1016
1017 pipe_lock(pipe);
1018 while (len) {
1019 struct pipe_buffer *buf;
1020 void *data[PIPE_BUFFERS];
1021 struct iovec vec[PIPE_BUFFERS];
1022 unsigned int nr_pages = 0;
1023 unsigned int write_len = 0;
1024 unsigned int now_len = len;
1025 unsigned int this_len;
1026 int i;
1027
1028 BUG_ON(pipe->nrbufs > PIPE_BUFFERS);
1029 for (i = 0; i < pipe->nrbufs && now_len; i++) {
1030 buf = nth_pipe_buf(pipe, i);
1031
1032 ret = buf->ops->confirm(pipe, buf);
1033 if (ret)
1034 break;
1035
1036 data[i] = buf->ops->map(pipe, buf, 0);
1037 this_len = min(buf->len, now_len);
1038 vec[i].iov_base = (void __user *) data[i] + buf->offset;
1039 vec[i].iov_len = this_len;
1040 now_len -= this_len;
1041 write_len += this_len;
1042 nr_pages++;
1043 }
1044
1045 if (nr_pages) {
1046 ret = kernel_writev(out, vec, nr_pages, ppos);
1047 if (ret == 0)
1048 ret = -EIO;
1049 if (ret > 0) {
1050 len -= ret;
1051 total_len += ret;
1052 }
1053 }
1054
1055 for (i = 0; i < nr_pages; i++) {
1056 buf = nth_pipe_buf(pipe, i);
1057 buf->ops->unmap(pipe, buf, data[i]);
1058
1059 if (ret > 0) {
1060 this_len = min_t(unsigned, vec[i].iov_len, ret);
1061 buf->offset += this_len;
1062 buf->len -= this_len;
1063 ret -= this_len;
1064 }
1065 }
1066
1067 if (ret < 0)
1068 break;
1069
1070 while (pipe->nrbufs) {
1071 const struct pipe_buf_operations *ops;
1072
1073 buf = nth_pipe_buf(pipe, 0);
1074 if (buf->len)
1075 break;
1076
1077 ops = buf->ops;
1078 buf->ops = NULL;
1079 ops->release(pipe, buf);
1080 pipe->curbuf = (pipe->curbuf + 1) % PIPE_BUFFERS;
1081 pipe->nrbufs--;
1082 if (pipe->inode)
1083 do_wakeup = 1;
1084 }
1085
1086 if (pipe->nrbufs)
1087 continue;
1088 if (!pipe->writers)
1089 break;
1090 if (!pipe->waiting_writers) {
1091 if (total_len)
1092 break;
1093 }
1094
1095 if (flags & SPLICE_F_NONBLOCK) {
1096 ret = -EAGAIN;
1097 break;
1098 }
1099
1100 if (signal_pending(current)) {
1101 ret = -ERESTARTSYS;
1102 break;
1103 }
1104
1105 if (do_wakeup) {
1106 wakeup_pipe_writers(pipe);
1107 do_wakeup = 0;
1108 }
1109
1110 pipe_wait(pipe);
1111 }
1112 pipe_unlock(pipe);
1113
1114 if (do_wakeup)
1115 wakeup_pipe_writers(pipe);
1116
1117 return total_len ? total_len : ret;
1118}
1119
83f9135b
JA
1120/**
1121 * generic_splice_sendpage - splice data from a pipe to a socket
932cc6d4 1122 * @pipe: pipe to splice from
83f9135b 1123 * @out: socket to write to
932cc6d4 1124 * @ppos: position in @out
83f9135b
JA
1125 * @len: number of bytes to splice
1126 * @flags: splice modifier flags
1127 *
932cc6d4
JA
1128 * Description:
1129 * Will send @len bytes from the pipe to a network socket. No data copying
1130 * is involved.
83f9135b
JA
1131 *
1132 */
3a326a2c 1133ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
cbb7e577 1134 loff_t *ppos, size_t len, unsigned int flags)
5274f052 1135{
00522fb4 1136 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
5274f052
JA
1137}
1138
059a8f37 1139EXPORT_SYMBOL(generic_splice_sendpage);
a0f06780 1140
83f9135b
JA
1141/*
1142 * Attempt to initiate a splice from pipe to file.
1143 */
3a326a2c 1144static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
cbb7e577 1145 loff_t *ppos, size_t len, unsigned int flags)
5274f052 1146{
0b0a47f5
MS
1147 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
1148 loff_t *, size_t, unsigned int);
5274f052
JA
1149 int ret;
1150
49570e9b 1151 if (unlikely(!(out->f_mode & FMODE_WRITE)))
5274f052
JA
1152 return -EBADF;
1153
efc968d4
LT
1154 if (unlikely(out->f_flags & O_APPEND))
1155 return -EINVAL;
1156
cbb7e577 1157 ret = rw_verify_area(WRITE, out, ppos, len);
5274f052
JA
1158 if (unlikely(ret < 0))
1159 return ret;
1160
0b0a47f5
MS
1161 splice_write = out->f_op->splice_write;
1162 if (!splice_write)
1163 splice_write = default_file_splice_write;
1164
1165 return splice_write(pipe, out, ppos, len, flags);
5274f052
JA
1166}
1167
83f9135b
JA
1168/*
1169 * Attempt to initiate a splice from a file to a pipe.
1170 */
cbb7e577
JA
1171static long do_splice_to(struct file *in, loff_t *ppos,
1172 struct pipe_inode_info *pipe, size_t len,
1173 unsigned int flags)
5274f052 1174{
6818173b
MS
1175 ssize_t (*splice_read)(struct file *, loff_t *,
1176 struct pipe_inode_info *, size_t, unsigned int);
5274f052
JA
1177 int ret;
1178
49570e9b 1179 if (unlikely(!(in->f_mode & FMODE_READ)))
5274f052
JA
1180 return -EBADF;
1181
cbb7e577 1182 ret = rw_verify_area(READ, in, ppos, len);
5274f052
JA
1183 if (unlikely(ret < 0))
1184 return ret;
1185
6818173b
MS
1186 splice_read = in->f_op->splice_read;
1187 if (!splice_read)
1188 splice_read = default_file_splice_read;
1189
1190 return splice_read(in, ppos, pipe, len, flags);
5274f052
JA
1191}
1192
932cc6d4
JA
1193/**
1194 * splice_direct_to_actor - splices data directly between two non-pipes
1195 * @in: file to splice from
1196 * @sd: actor information on where to splice to
1197 * @actor: handles the data splicing
1198 *
1199 * Description:
1200 * This is a special case helper to splice directly between two
1201 * points, without requiring an explicit pipe. Internally an allocated
79685b8d 1202 * pipe is cached in the process, and reused during the lifetime of
932cc6d4
JA
1203 * that process.
1204 *
c66ab6fa
JA
1205 */
1206ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1207 splice_direct_actor *actor)
b92ce558
JA
1208{
1209 struct pipe_inode_info *pipe;
1210 long ret, bytes;
1211 umode_t i_mode;
c66ab6fa
JA
1212 size_t len;
1213 int i, flags;
b92ce558
JA
1214
1215 /*
1216 * We require the input being a regular file, as we don't want to
1217 * randomly drop data for eg socket -> socket splicing. Use the
1218 * piped splicing for that!
1219 */
0f7fc9e4 1220 i_mode = in->f_path.dentry->d_inode->i_mode;
b92ce558
JA
1221 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
1222 return -EINVAL;
1223
1224 /*
1225 * neither in nor out is a pipe, setup an internal pipe attached to
1226 * 'out' and transfer the wanted data from 'in' to 'out' through that
1227 */
1228 pipe = current->splice_pipe;
49570e9b 1229 if (unlikely(!pipe)) {
b92ce558
JA
1230 pipe = alloc_pipe_info(NULL);
1231 if (!pipe)
1232 return -ENOMEM;
1233
1234 /*
1235 * We don't have an immediate reader, but we'll read the stuff
00522fb4 1236 * out of the pipe right after the splice_to_pipe(). So set
b92ce558
JA
1237 * PIPE_READERS appropriately.
1238 */
1239 pipe->readers = 1;
1240
1241 current->splice_pipe = pipe;
1242 }
1243
1244 /*
73d62d83 1245 * Do the splice.
b92ce558
JA
1246 */
1247 ret = 0;
1248 bytes = 0;
c66ab6fa
JA
1249 len = sd->total_len;
1250 flags = sd->flags;
1251
1252 /*
1253 * Don't block on output, we have to drain the direct pipe.
1254 */
1255 sd->flags &= ~SPLICE_F_NONBLOCK;
b92ce558
JA
1256
1257 while (len) {
51a92c0f 1258 size_t read_len;
a82c53a0 1259 loff_t pos = sd->pos, prev_pos = pos;
b92ce558 1260
bcd4f3ac 1261 ret = do_splice_to(in, &pos, pipe, len, flags);
51a92c0f 1262 if (unlikely(ret <= 0))
b92ce558
JA
1263 goto out_release;
1264
1265 read_len = ret;
c66ab6fa 1266 sd->total_len = read_len;
b92ce558
JA
1267
1268 /*
1269 * NOTE: nonblocking mode only applies to the input. We
1270 * must not do the output in nonblocking mode as then we
1271 * could get stuck data in the internal pipe:
1272 */
c66ab6fa 1273 ret = actor(pipe, sd);
a82c53a0
TZ
1274 if (unlikely(ret <= 0)) {
1275 sd->pos = prev_pos;
b92ce558 1276 goto out_release;
a82c53a0 1277 }
b92ce558
JA
1278
1279 bytes += ret;
1280 len -= ret;
bcd4f3ac 1281 sd->pos = pos;
b92ce558 1282
a82c53a0
TZ
1283 if (ret < read_len) {
1284 sd->pos = prev_pos + ret;
51a92c0f 1285 goto out_release;
a82c53a0 1286 }
b92ce558
JA
1287 }
1288
9e97198d 1289done:
b92ce558 1290 pipe->nrbufs = pipe->curbuf = 0;
80848708 1291 file_accessed(in);
b92ce558
JA
1292 return bytes;
1293
1294out_release:
1295 /*
1296 * If we did an incomplete transfer we must release
1297 * the pipe buffers in question:
1298 */
1299 for (i = 0; i < PIPE_BUFFERS; i++) {
1300 struct pipe_buffer *buf = pipe->bufs + i;
1301
1302 if (buf->ops) {
1303 buf->ops->release(pipe, buf);
1304 buf->ops = NULL;
1305 }
1306 }
b92ce558 1307
9e97198d
JA
1308 if (!bytes)
1309 bytes = ret;
c66ab6fa 1310
9e97198d 1311 goto done;
c66ab6fa
JA
1312}
1313EXPORT_SYMBOL(splice_direct_to_actor);
1314
1315static int direct_splice_actor(struct pipe_inode_info *pipe,
1316 struct splice_desc *sd)
1317{
6a14b90b 1318 struct file *file = sd->u.file;
c66ab6fa
JA
1319
1320 return do_splice_from(pipe, file, &sd->pos, sd->total_len, sd->flags);
1321}
1322
932cc6d4
JA
1323/**
1324 * do_splice_direct - splices data directly between two files
1325 * @in: file to splice from
1326 * @ppos: input file offset
1327 * @out: file to splice to
1328 * @len: number of bytes to splice
1329 * @flags: splice modifier flags
1330 *
1331 * Description:
1332 * For use by do_sendfile(). splice can easily emulate sendfile, but
1333 * doing it in the application would incur an extra system call
1334 * (splice in + splice out, as compared to just sendfile()). So this helper
1335 * can splice directly through a process-private pipe.
1336 *
1337 */
c66ab6fa
JA
1338long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1339 size_t len, unsigned int flags)
1340{
1341 struct splice_desc sd = {
1342 .len = len,
1343 .total_len = len,
1344 .flags = flags,
1345 .pos = *ppos,
6a14b90b 1346 .u.file = out,
c66ab6fa 1347 };
51a92c0f 1348 long ret;
c66ab6fa
JA
1349
1350 ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
51a92c0f 1351 if (ret > 0)
a82c53a0 1352 *ppos = sd.pos;
51a92c0f 1353
c66ab6fa 1354 return ret;
b92ce558
JA
1355}
1356
7c77f0b3
MS
1357static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1358 struct pipe_inode_info *opipe,
1359 size_t len, unsigned int flags);
ddac0d39
JA
1360/*
1361 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1362 * location, so checking ->i_pipe is not enough to verify that this is a
1363 * pipe.
1364 */
1365static inline struct pipe_inode_info *pipe_info(struct inode *inode)
1366{
1367 if (S_ISFIFO(inode->i_mode))
1368 return inode->i_pipe;
1369
1370 return NULL;
1371}
1372
83f9135b
JA
1373/*
1374 * Determine where to splice to/from.
1375 */
529565dc
IM
1376static long do_splice(struct file *in, loff_t __user *off_in,
1377 struct file *out, loff_t __user *off_out,
1378 size_t len, unsigned int flags)
5274f052 1379{
7c77f0b3
MS
1380 struct pipe_inode_info *ipipe;
1381 struct pipe_inode_info *opipe;
cbb7e577 1382 loff_t offset, *off;
a4514ebd 1383 long ret;
5274f052 1384
7c77f0b3
MS
1385 ipipe = pipe_info(in->f_path.dentry->d_inode);
1386 opipe = pipe_info(out->f_path.dentry->d_inode);
1387
1388 if (ipipe && opipe) {
1389 if (off_in || off_out)
1390 return -ESPIPE;
1391
1392 if (!(in->f_mode & FMODE_READ))
1393 return -EBADF;
1394
1395 if (!(out->f_mode & FMODE_WRITE))
1396 return -EBADF;
1397
1398 /* Splicing to self would be fun, but... */
1399 if (ipipe == opipe)
1400 return -EINVAL;
1401
1402 return splice_pipe_to_pipe(ipipe, opipe, len, flags);
1403 }
1404
1405 if (ipipe) {
529565dc
IM
1406 if (off_in)
1407 return -ESPIPE;
b92ce558
JA
1408 if (off_out) {
1409 if (out->f_op->llseek == no_llseek)
1410 return -EINVAL;
cbb7e577 1411 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
b92ce558 1412 return -EFAULT;
cbb7e577
JA
1413 off = &offset;
1414 } else
1415 off = &out->f_pos;
529565dc 1416
7c77f0b3 1417 ret = do_splice_from(ipipe, out, off, len, flags);
a4514ebd
JA
1418
1419 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
1420 ret = -EFAULT;
1421
1422 return ret;
529565dc 1423 }
5274f052 1424
7c77f0b3 1425 if (opipe) {
529565dc
IM
1426 if (off_out)
1427 return -ESPIPE;
b92ce558
JA
1428 if (off_in) {
1429 if (in->f_op->llseek == no_llseek)
1430 return -EINVAL;
cbb7e577 1431 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
b92ce558 1432 return -EFAULT;
cbb7e577
JA
1433 off = &offset;
1434 } else
1435 off = &in->f_pos;
529565dc 1436
7c77f0b3 1437 ret = do_splice_to(in, off, opipe, len, flags);
a4514ebd
JA
1438
1439 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1440 ret = -EFAULT;
1441
1442 return ret;
529565dc 1443 }
5274f052
JA
1444
1445 return -EINVAL;
1446}
1447
912d35f8
JA
1448/*
1449 * Map an iov into an array of pages and offset/length tupples. With the
1450 * partial_page structure, we can map several non-contiguous ranges into
1451 * our ones pages[] map instead of splitting that operation into pieces.
1452 * Could easily be exported as a generic helper for other users, in which
1453 * case one would probably want to add a 'max_nr_pages' parameter as well.
1454 */
1455static int get_iovec_page_array(const struct iovec __user *iov,
1456 unsigned int nr_vecs, struct page **pages,
7afa6fd0 1457 struct partial_page *partial, int aligned)
912d35f8
JA
1458{
1459 int buffers = 0, error = 0;
1460
912d35f8
JA
1461 while (nr_vecs) {
1462 unsigned long off, npages;
75723957 1463 struct iovec entry;
912d35f8
JA
1464 void __user *base;
1465 size_t len;
1466 int i;
1467
75723957 1468 error = -EFAULT;
bc40d73c 1469 if (copy_from_user(&entry, iov, sizeof(entry)))
912d35f8
JA
1470 break;
1471
75723957
LT
1472 base = entry.iov_base;
1473 len = entry.iov_len;
1474
912d35f8
JA
1475 /*
1476 * Sanity check this iovec. 0 read succeeds.
1477 */
75723957 1478 error = 0;
912d35f8
JA
1479 if (unlikely(!len))
1480 break;
1481 error = -EFAULT;
712a30e6 1482 if (!access_ok(VERIFY_READ, base, len))
912d35f8
JA
1483 break;
1484
1485 /*
1486 * Get this base offset and number of pages, then map
1487 * in the user pages.
1488 */
1489 off = (unsigned long) base & ~PAGE_MASK;
7afa6fd0
JA
1490
1491 /*
1492 * If asked for alignment, the offset must be zero and the
1493 * length a multiple of the PAGE_SIZE.
1494 */
1495 error = -EINVAL;
1496 if (aligned && (off || len & ~PAGE_MASK))
1497 break;
1498
912d35f8
JA
1499 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1500 if (npages > PIPE_BUFFERS - buffers)
1501 npages = PIPE_BUFFERS - buffers;
1502
bc40d73c
NP
1503 error = get_user_pages_fast((unsigned long)base, npages,
1504 0, &pages[buffers]);
912d35f8
JA
1505
1506 if (unlikely(error <= 0))
1507 break;
1508
1509 /*
1510 * Fill this contiguous range into the partial page map.
1511 */
1512 for (i = 0; i < error; i++) {
7591489a 1513 const int plen = min_t(size_t, len, PAGE_SIZE - off);
912d35f8
JA
1514
1515 partial[buffers].offset = off;
1516 partial[buffers].len = plen;
1517
1518 off = 0;
1519 len -= plen;
1520 buffers++;
1521 }
1522
1523 /*
1524 * We didn't complete this iov, stop here since it probably
1525 * means we have to move some of this into a pipe to
1526 * be able to continue.
1527 */
1528 if (len)
1529 break;
1530
1531 /*
1532 * Don't continue if we mapped fewer pages than we asked for,
1533 * or if we mapped the max number of pages that we have
1534 * room for.
1535 */
1536 if (error < npages || buffers == PIPE_BUFFERS)
1537 break;
1538
1539 nr_vecs--;
1540 iov++;
1541 }
1542
912d35f8
JA
1543 if (buffers)
1544 return buffers;
1545
1546 return error;
1547}
1548
6a14b90b
JA
1549static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1550 struct splice_desc *sd)
1551{
1552 char *src;
1553 int ret;
1554
cac36bb0 1555 ret = buf->ops->confirm(pipe, buf);
6a14b90b
JA
1556 if (unlikely(ret))
1557 return ret;
1558
1559 /*
1560 * See if we can use the atomic maps, by prefaulting in the
1561 * pages and doing an atomic copy
1562 */
1563 if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) {
1564 src = buf->ops->map(pipe, buf, 1);
1565 ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
1566 sd->len);
1567 buf->ops->unmap(pipe, buf, src);
1568 if (!ret) {
1569 ret = sd->len;
1570 goto out;
1571 }
1572 }
1573
1574 /*
1575 * No dice, use slow non-atomic map and copy
1576 */
1577 src = buf->ops->map(pipe, buf, 0);
1578
1579 ret = sd->len;
1580 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
1581 ret = -EFAULT;
1582
6866bef4 1583 buf->ops->unmap(pipe, buf, src);
6a14b90b
JA
1584out:
1585 if (ret > 0)
1586 sd->u.userptr += ret;
6a14b90b
JA
1587 return ret;
1588}
1589
1590/*
1591 * For lack of a better implementation, implement vmsplice() to userspace
1592 * as a simple copy of the pipes pages to the user iov.
1593 */
1594static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1595 unsigned long nr_segs, unsigned int flags)
1596{
1597 struct pipe_inode_info *pipe;
1598 struct splice_desc sd;
1599 ssize_t size;
1600 int error;
1601 long ret;
1602
1603 pipe = pipe_info(file->f_path.dentry->d_inode);
1604 if (!pipe)
1605 return -EBADF;
1606
61e0d47c 1607 pipe_lock(pipe);
6a14b90b
JA
1608
1609 error = ret = 0;
1610 while (nr_segs) {
1611 void __user *base;
1612 size_t len;
1613
1614 /*
1615 * Get user address base and length for this iovec.
1616 */
1617 error = get_user(base, &iov->iov_base);
1618 if (unlikely(error))
1619 break;
1620 error = get_user(len, &iov->iov_len);
1621 if (unlikely(error))
1622 break;
1623
1624 /*
1625 * Sanity check this iovec. 0 read succeeds.
1626 */
1627 if (unlikely(!len))
1628 break;
1629 if (unlikely(!base)) {
1630 error = -EFAULT;
1631 break;
1632 }
1633
8811930d
JA
1634 if (unlikely(!access_ok(VERIFY_WRITE, base, len))) {
1635 error = -EFAULT;
1636 break;
1637 }
1638
6a14b90b
JA
1639 sd.len = 0;
1640 sd.total_len = len;
1641 sd.flags = flags;
1642 sd.u.userptr = base;
1643 sd.pos = 0;
1644
1645 size = __splice_from_pipe(pipe, &sd, pipe_to_user);
1646 if (size < 0) {
1647 if (!ret)
1648 ret = size;
1649
1650 break;
1651 }
1652
1653 ret += size;
1654
1655 if (size < len)
1656 break;
1657
1658 nr_segs--;
1659 iov++;
1660 }
1661
61e0d47c 1662 pipe_unlock(pipe);
6a14b90b
JA
1663
1664 if (!ret)
1665 ret = error;
1666
1667 return ret;
1668}
1669
912d35f8
JA
1670/*
1671 * vmsplice splices a user address range into a pipe. It can be thought of
1672 * as splice-from-memory, where the regular splice is splice-from-file (or
1673 * to file). In both cases the output is a pipe, naturally.
912d35f8 1674 */
6a14b90b
JA
1675static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
1676 unsigned long nr_segs, unsigned int flags)
912d35f8 1677{
ddac0d39 1678 struct pipe_inode_info *pipe;
912d35f8
JA
1679 struct page *pages[PIPE_BUFFERS];
1680 struct partial_page partial[PIPE_BUFFERS];
1681 struct splice_pipe_desc spd = {
1682 .pages = pages,
1683 .partial = partial,
1684 .flags = flags,
1685 .ops = &user_page_pipe_buf_ops,
bbdfc2f7 1686 .spd_release = spd_release_page,
912d35f8
JA
1687 };
1688
0f7fc9e4 1689 pipe = pipe_info(file->f_path.dentry->d_inode);
ddac0d39 1690 if (!pipe)
912d35f8 1691 return -EBADF;
912d35f8 1692
7afa6fd0
JA
1693 spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial,
1694 flags & SPLICE_F_GIFT);
912d35f8
JA
1695 if (spd.nr_pages <= 0)
1696 return spd.nr_pages;
1697
00522fb4 1698 return splice_to_pipe(pipe, &spd);
912d35f8
JA
1699}
1700
6a14b90b
JA
1701/*
1702 * Note that vmsplice only really supports true splicing _from_ user memory
1703 * to a pipe, not the other way around. Splicing from user memory is a simple
1704 * operation that can be supported without any funky alignment restrictions
1705 * or nasty vm tricks. We simply map in the user memory and fill them into
1706 * a pipe. The reverse isn't quite as easy, though. There are two possible
1707 * solutions for that:
1708 *
1709 * - memcpy() the data internally, at which point we might as well just
1710 * do a regular read() on the buffer anyway.
1711 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1712 * has restriction limitations on both ends of the pipe).
1713 *
1714 * Currently we punt and implement it as a normal copy, see pipe_to_user().
1715 *
1716 */
836f92ad
HC
1717SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov,
1718 unsigned long, nr_segs, unsigned int, flags)
912d35f8
JA
1719{
1720 struct file *file;
1721 long error;
1722 int fput;
1723
6a14b90b
JA
1724 if (unlikely(nr_segs > UIO_MAXIOV))
1725 return -EINVAL;
1726 else if (unlikely(!nr_segs))
1727 return 0;
1728
912d35f8
JA
1729 error = -EBADF;
1730 file = fget_light(fd, &fput);
1731 if (file) {
1732 if (file->f_mode & FMODE_WRITE)
6a14b90b
JA
1733 error = vmsplice_to_pipe(file, iov, nr_segs, flags);
1734 else if (file->f_mode & FMODE_READ)
1735 error = vmsplice_to_user(file, iov, nr_segs, flags);
912d35f8
JA
1736
1737 fput_light(file, fput);
1738 }
1739
1740 return error;
1741}
1742
836f92ad
HC
1743SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
1744 int, fd_out, loff_t __user *, off_out,
1745 size_t, len, unsigned int, flags)
5274f052
JA
1746{
1747 long error;
1748 struct file *in, *out;
1749 int fput_in, fput_out;
1750
1751 if (unlikely(!len))
1752 return 0;
1753
1754 error = -EBADF;
529565dc 1755 in = fget_light(fd_in, &fput_in);
5274f052
JA
1756 if (in) {
1757 if (in->f_mode & FMODE_READ) {
529565dc 1758 out = fget_light(fd_out, &fput_out);
5274f052
JA
1759 if (out) {
1760 if (out->f_mode & FMODE_WRITE)
529565dc
IM
1761 error = do_splice(in, off_in,
1762 out, off_out,
1763 len, flags);
5274f052
JA
1764 fput_light(out, fput_out);
1765 }
1766 }
1767
1768 fput_light(in, fput_in);
1769 }
1770
1771 return error;
1772}
70524490 1773
aadd06e5
JA
1774/*
1775 * Make sure there's data to read. Wait for input if we can, otherwise
1776 * return an appropriate error.
1777 */
7c77f0b3 1778static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
aadd06e5
JA
1779{
1780 int ret;
1781
1782 /*
1783 * Check ->nrbufs without the inode lock first. This function
1784 * is speculative anyways, so missing one is ok.
1785 */
1786 if (pipe->nrbufs)
1787 return 0;
1788
1789 ret = 0;
61e0d47c 1790 pipe_lock(pipe);
aadd06e5
JA
1791
1792 while (!pipe->nrbufs) {
1793 if (signal_pending(current)) {
1794 ret = -ERESTARTSYS;
1795 break;
1796 }
1797 if (!pipe->writers)
1798 break;
1799 if (!pipe->waiting_writers) {
1800 if (flags & SPLICE_F_NONBLOCK) {
1801 ret = -EAGAIN;
1802 break;
1803 }
1804 }
1805 pipe_wait(pipe);
1806 }
1807
61e0d47c 1808 pipe_unlock(pipe);
aadd06e5
JA
1809 return ret;
1810}
1811
1812/*
1813 * Make sure there's writeable room. Wait for room if we can, otherwise
1814 * return an appropriate error.
1815 */
7c77f0b3 1816static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
aadd06e5
JA
1817{
1818 int ret;
1819
1820 /*
1821 * Check ->nrbufs without the inode lock first. This function
1822 * is speculative anyways, so missing one is ok.
1823 */
1824 if (pipe->nrbufs < PIPE_BUFFERS)
1825 return 0;
1826
1827 ret = 0;
61e0d47c 1828 pipe_lock(pipe);
aadd06e5
JA
1829
1830 while (pipe->nrbufs >= PIPE_BUFFERS) {
1831 if (!pipe->readers) {
1832 send_sig(SIGPIPE, current, 0);
1833 ret = -EPIPE;
1834 break;
1835 }
1836 if (flags & SPLICE_F_NONBLOCK) {
1837 ret = -EAGAIN;
1838 break;
1839 }
1840 if (signal_pending(current)) {
1841 ret = -ERESTARTSYS;
1842 break;
1843 }
1844 pipe->waiting_writers++;
1845 pipe_wait(pipe);
1846 pipe->waiting_writers--;
1847 }
1848
61e0d47c 1849 pipe_unlock(pipe);
aadd06e5
JA
1850 return ret;
1851}
1852
7c77f0b3
MS
1853/*
1854 * Splice contents of ipipe to opipe.
1855 */
1856static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1857 struct pipe_inode_info *opipe,
1858 size_t len, unsigned int flags)
1859{
1860 struct pipe_buffer *ibuf, *obuf;
1861 int ret = 0, nbuf;
1862 bool input_wakeup = false;
1863
1864
1865retry:
1866 ret = ipipe_prep(ipipe, flags);
1867 if (ret)
1868 return ret;
1869
1870 ret = opipe_prep(opipe, flags);
1871 if (ret)
1872 return ret;
1873
1874 /*
1875 * Potential ABBA deadlock, work around it by ordering lock
1876 * grabbing by pipe info address. Otherwise two different processes
1877 * could deadlock (one doing tee from A -> B, the other from B -> A).
1878 */
1879 pipe_double_lock(ipipe, opipe);
1880
1881 do {
1882 if (!opipe->readers) {
1883 send_sig(SIGPIPE, current, 0);
1884 if (!ret)
1885 ret = -EPIPE;
1886 break;
1887 }
1888
1889 if (!ipipe->nrbufs && !ipipe->writers)
1890 break;
1891
1892 /*
1893 * Cannot make any progress, because either the input
1894 * pipe is empty or the output pipe is full.
1895 */
1896 if (!ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS) {
1897 /* Already processed some buffers, break */
1898 if (ret)
1899 break;
1900
1901 if (flags & SPLICE_F_NONBLOCK) {
1902 ret = -EAGAIN;
1903 break;
1904 }
1905
1906 /*
1907 * We raced with another reader/writer and haven't
1908 * managed to process any buffers. A zero return
1909 * value means EOF, so retry instead.
1910 */
1911 pipe_unlock(ipipe);
1912 pipe_unlock(opipe);
1913 goto retry;
1914 }
1915
1916 ibuf = ipipe->bufs + ipipe->curbuf;
1917 nbuf = (opipe->curbuf + opipe->nrbufs) % PIPE_BUFFERS;
1918 obuf = opipe->bufs + nbuf;
1919
1920 if (len >= ibuf->len) {
1921 /*
1922 * Simply move the whole buffer from ipipe to opipe
1923 */
1924 *obuf = *ibuf;
1925 ibuf->ops = NULL;
1926 opipe->nrbufs++;
1927 ipipe->curbuf = (ipipe->curbuf + 1) % PIPE_BUFFERS;
1928 ipipe->nrbufs--;
1929 input_wakeup = true;
1930 } else {
1931 /*
1932 * Get a reference to this pipe buffer,
1933 * so we can copy the contents over.
1934 */
1935 ibuf->ops->get(ipipe, ibuf);
1936 *obuf = *ibuf;
1937
1938 /*
1939 * Don't inherit the gift flag, we need to
1940 * prevent multiple steals of this page.
1941 */
1942 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1943
1944 obuf->len = len;
1945 opipe->nrbufs++;
1946 ibuf->offset += obuf->len;
1947 ibuf->len -= obuf->len;
1948 }
1949 ret += obuf->len;
1950 len -= obuf->len;
1951 } while (len);
1952
1953 pipe_unlock(ipipe);
1954 pipe_unlock(opipe);
1955
1956 /*
1957 * If we put data in the output pipe, wakeup any potential readers.
1958 */
1959 if (ret > 0) {
1960 smp_mb();
1961 if (waitqueue_active(&opipe->wait))
1962 wake_up_interruptible(&opipe->wait);
1963 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1964 }
1965 if (input_wakeup)
1966 wakeup_pipe_writers(ipipe);
1967
1968 return ret;
1969}
1970
70524490
JA
1971/*
1972 * Link contents of ipipe to opipe.
1973 */
1974static int link_pipe(struct pipe_inode_info *ipipe,
1975 struct pipe_inode_info *opipe,
1976 size_t len, unsigned int flags)
1977{
1978 struct pipe_buffer *ibuf, *obuf;
aadd06e5 1979 int ret = 0, i = 0, nbuf;
70524490
JA
1980
1981 /*
1982 * Potential ABBA deadlock, work around it by ordering lock
61e0d47c 1983 * grabbing by pipe info address. Otherwise two different processes
70524490
JA
1984 * could deadlock (one doing tee from A -> B, the other from B -> A).
1985 */
61e0d47c 1986 pipe_double_lock(ipipe, opipe);
70524490 1987
aadd06e5 1988 do {
70524490
JA
1989 if (!opipe->readers) {
1990 send_sig(SIGPIPE, current, 0);
1991 if (!ret)
1992 ret = -EPIPE;
1993 break;
1994 }
70524490 1995
aadd06e5
JA
1996 /*
1997 * If we have iterated all input buffers or ran out of
1998 * output room, break.
1999 */
2000 if (i >= ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS)
2001 break;
70524490 2002
aadd06e5
JA
2003 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
2004 nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
70524490
JA
2005
2006 /*
aadd06e5
JA
2007 * Get a reference to this pipe buffer,
2008 * so we can copy the contents over.
70524490 2009 */
aadd06e5
JA
2010 ibuf->ops->get(ipipe, ibuf);
2011
2012 obuf = opipe->bufs + nbuf;
2013 *obuf = *ibuf;
2014
2a27250e 2015 /*
aadd06e5
JA
2016 * Don't inherit the gift flag, we need to
2017 * prevent multiple steals of this page.
2a27250e 2018 */
aadd06e5 2019 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
70524490 2020
aadd06e5
JA
2021 if (obuf->len > len)
2022 obuf->len = len;
70524490 2023
aadd06e5
JA
2024 opipe->nrbufs++;
2025 ret += obuf->len;
2026 len -= obuf->len;
2027 i++;
2028 } while (len);
70524490 2029
02cf01ae
JA
2030 /*
2031 * return EAGAIN if we have the potential of some data in the
2032 * future, otherwise just return 0
2033 */
2034 if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
2035 ret = -EAGAIN;
2036
61e0d47c
MS
2037 pipe_unlock(ipipe);
2038 pipe_unlock(opipe);
70524490 2039
aadd06e5
JA
2040 /*
2041 * If we put data in the output pipe, wakeup any potential readers.
2042 */
2043 if (ret > 0) {
70524490
JA
2044 smp_mb();
2045 if (waitqueue_active(&opipe->wait))
2046 wake_up_interruptible(&opipe->wait);
2047 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
2048 }
2049
2050 return ret;
2051}
2052
2053/*
2054 * This is a tee(1) implementation that works on pipes. It doesn't copy
2055 * any data, it simply references the 'in' pages on the 'out' pipe.
2056 * The 'flags' used are the SPLICE_F_* variants, currently the only
2057 * applicable one is SPLICE_F_NONBLOCK.
2058 */
2059static long do_tee(struct file *in, struct file *out, size_t len,
2060 unsigned int flags)
2061{
0f7fc9e4
JJS
2062 struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode);
2063 struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode);
aadd06e5 2064 int ret = -EINVAL;
70524490
JA
2065
2066 /*
aadd06e5
JA
2067 * Duplicate the contents of ipipe to opipe without actually
2068 * copying the data.
70524490 2069 */
aadd06e5
JA
2070 if (ipipe && opipe && ipipe != opipe) {
2071 /*
2072 * Keep going, unless we encounter an error. The ipipe/opipe
2073 * ordering doesn't really matter.
2074 */
7c77f0b3 2075 ret = ipipe_prep(ipipe, flags);
aadd06e5 2076 if (!ret) {
7c77f0b3 2077 ret = opipe_prep(opipe, flags);
02cf01ae 2078 if (!ret)
aadd06e5 2079 ret = link_pipe(ipipe, opipe, len, flags);
aadd06e5
JA
2080 }
2081 }
70524490 2082
aadd06e5 2083 return ret;
70524490
JA
2084}
2085
836f92ad 2086SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
70524490
JA
2087{
2088 struct file *in;
2089 int error, fput_in;
2090
2091 if (unlikely(!len))
2092 return 0;
2093
2094 error = -EBADF;
2095 in = fget_light(fdin, &fput_in);
2096 if (in) {
2097 if (in->f_mode & FMODE_READ) {
2098 int fput_out;
2099 struct file *out = fget_light(fdout, &fput_out);
2100
2101 if (out) {
2102 if (out->f_mode & FMODE_WRITE)
2103 error = do_tee(in, out, len, flags);
2104 fput_light(out, fput_out);
2105 }
2106 }
2107 fput_light(in, fput_in);
2108 }
2109
2110 return error;
2111}