blob: 9f796b1034d1328189f9a308955cac9338df2abf [file] [log] [blame]
Jens Axboe5274f052006-03-30 15:15:30 +02001/*
2 * "splice": joining two ropes together by interweaving their strands.
3 *
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
7 *
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
10 *
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files and fixing the initial implementation
13 * bugs.
14 *
15 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
17 *
18 */
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/pagemap.h>
22#include <linux/pipe_fs_i.h>
23#include <linux/mm_inline.h>
Jens Axboe5abc97a2006-03-30 15:16:46 +020024#include <linux/swap.h>
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020025#include <linux/writeback.h>
26#include <linux/buffer_head.h>
Jeff Garzika0f06782006-03-30 23:06:13 -050027#include <linux/module.h>
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020028#include <linux/syscalls.h>
Jens Axboe5274f052006-03-30 15:15:30 +020029
30/*
31 * Passed to the actors
32 */
33struct splice_desc {
34 unsigned int len, total_len; /* current and remaining length */
35 unsigned int flags; /* splice flags */
36 struct file *file; /* file to read/write */
37 loff_t pos; /* file position */
38};
39
Jens Axboe83f91352006-04-02 23:05:09 +020040/*
41 * Attempt to steal a page from a pipe buffer. This should perhaps go into
42 * a vm helper function, it's already simplified quite a bit by the
43 * addition of remove_mapping(). If success is returned, the caller may
44 * attempt to reuse this page for another destination.
45 */
Jens Axboe5abc97a2006-03-30 15:16:46 +020046static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47 struct pipe_buffer *buf)
48{
49 struct page *page = buf->page;
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020050 struct address_space *mapping = page_mapping(page);
Jens Axboe5abc97a2006-03-30 15:16:46 +020051
52 WARN_ON(!PageLocked(page));
53 WARN_ON(!PageUptodate(page));
54
Jens Axboead8d6f02006-04-02 23:10:32 +020055 /*
56 * At least for ext2 with nobh option, we need to wait on writeback
57 * completing on this page, since we'll remove it from the pagecache.
58 * Otherwise truncate wont wait on the page, allowing the disk
59 * blocks to be reused by someone else before we actually wrote our
60 * data to them. fs corruption ensues.
61 */
62 wait_on_page_writeback(page);
63
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020064 if (PagePrivate(page))
65 try_to_release_page(page, mapping_gfp_mask(mapping));
66
67 if (!remove_mapping(mapping, page))
Jens Axboe5abc97a2006-03-30 15:16:46 +020068 return 1;
69
Jens Axboe3e7ee3e2006-04-02 23:11:04 +020070 buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
Jens Axboe5abc97a2006-03-30 15:16:46 +020071 return 0;
72}
73
Jens Axboe5274f052006-03-30 15:15:30 +020074static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
75 struct pipe_buffer *buf)
76{
77 page_cache_release(buf->page);
78 buf->page = NULL;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +020079 buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
Jens Axboe5274f052006-03-30 15:15:30 +020080}
81
82static void *page_cache_pipe_buf_map(struct file *file,
83 struct pipe_inode_info *info,
84 struct pipe_buffer *buf)
85{
86 struct page *page = buf->page;
87
88 lock_page(page);
89
90 if (!PageUptodate(page)) {
91 unlock_page(page);
92 return ERR_PTR(-EIO);
93 }
94
95 if (!page->mapping) {
96 unlock_page(page);
97 return ERR_PTR(-ENODATA);
98 }
99
100 return kmap(buf->page);
101}
102
103static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
104 struct pipe_buffer *buf)
105{
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200106 unlock_page(buf->page);
Jens Axboe5274f052006-03-30 15:15:30 +0200107 kunmap(buf->page);
108}
109
110static struct pipe_buf_operations page_cache_pipe_buf_ops = {
111 .can_merge = 0,
112 .map = page_cache_pipe_buf_map,
113 .unmap = page_cache_pipe_buf_unmap,
114 .release = page_cache_pipe_buf_release,
Jens Axboe5abc97a2006-03-30 15:16:46 +0200115 .steal = page_cache_pipe_buf_steal,
Jens Axboe5274f052006-03-30 15:15:30 +0200116};
117
Jens Axboe83f91352006-04-02 23:05:09 +0200118/*
119 * Pipe output worker. This sets up our pipe format with the page cache
120 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
121 */
Jens Axboe5274f052006-03-30 15:15:30 +0200122static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
123 int nr_pages, unsigned long offset,
Linus Torvalds29e35092006-04-02 12:46:35 -0700124 unsigned long len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200125{
126 struct pipe_inode_info *info;
127 int ret, do_wakeup, i;
128
129 ret = 0;
130 do_wakeup = 0;
131 i = 0;
132
133 mutex_lock(PIPE_MUTEX(*inode));
134
135 info = inode->i_pipe;
136 for (;;) {
137 int bufs;
138
139 if (!PIPE_READERS(*inode)) {
140 send_sig(SIGPIPE, current, 0);
141 if (!ret)
142 ret = -EPIPE;
143 break;
144 }
145
146 bufs = info->nrbufs;
147 if (bufs < PIPE_BUFFERS) {
148 int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS - 1);
149 struct pipe_buffer *buf = info->bufs + newbuf;
150 struct page *page = pages[i++];
151 unsigned long this_len;
152
153 this_len = PAGE_CACHE_SIZE - offset;
154 if (this_len > len)
155 this_len = len;
156
157 buf->page = page;
158 buf->offset = offset;
159 buf->len = this_len;
160 buf->ops = &page_cache_pipe_buf_ops;
161 info->nrbufs = ++bufs;
162 do_wakeup = 1;
163
164 ret += this_len;
165 len -= this_len;
166 offset = 0;
167 if (!--nr_pages)
168 break;
169 if (!len)
170 break;
171 if (bufs < PIPE_BUFFERS)
172 continue;
173
174 break;
175 }
176
Linus Torvalds29e35092006-04-02 12:46:35 -0700177 if (flags & SPLICE_F_NONBLOCK) {
178 if (!ret)
179 ret = -EAGAIN;
180 break;
181 }
182
Jens Axboe5274f052006-03-30 15:15:30 +0200183 if (signal_pending(current)) {
184 if (!ret)
185 ret = -ERESTARTSYS;
186 break;
187 }
188
189 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200190 smp_mb();
191 if (waitqueue_active(PIPE_WAIT(*inode)))
192 wake_up_interruptible_sync(PIPE_WAIT(*inode));
Jens Axboe5274f052006-03-30 15:15:30 +0200193 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO,
194 POLL_IN);
195 do_wakeup = 0;
196 }
197
198 PIPE_WAITING_WRITERS(*inode)++;
199 pipe_wait(inode);
200 PIPE_WAITING_WRITERS(*inode)--;
201 }
202
203 mutex_unlock(PIPE_MUTEX(*inode));
204
205 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200206 smp_mb();
207 if (waitqueue_active(PIPE_WAIT(*inode)))
208 wake_up_interruptible(PIPE_WAIT(*inode));
Jens Axboe5274f052006-03-30 15:15:30 +0200209 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
210 }
211
212 while (i < nr_pages)
213 page_cache_release(pages[i++]);
214
215 return ret;
216}
217
218static int __generic_file_splice_read(struct file *in, struct inode *pipe,
Linus Torvalds29e35092006-04-02 12:46:35 -0700219 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200220{
221 struct address_space *mapping = in->f_mapping;
222 unsigned int offset, nr_pages;
223 struct page *pages[PIPE_BUFFERS], *shadow[PIPE_BUFFERS];
224 struct page *page;
225 pgoff_t index, pidx;
226 int i, j;
227
228 index = in->f_pos >> PAGE_CACHE_SHIFT;
229 offset = in->f_pos & ~PAGE_CACHE_MASK;
230 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
231
232 if (nr_pages > PIPE_BUFFERS)
233 nr_pages = PIPE_BUFFERS;
234
235 /*
236 * initiate read-ahead on this page range
237 */
238 do_page_cache_readahead(mapping, in, index, nr_pages);
239
240 /*
241 * Get as many pages from the page cache as possible..
242 * Start IO on the page cache entries we create (we
243 * can assume that any pre-existing ones we find have
244 * already had IO started on them).
245 */
246 i = find_get_pages(mapping, index, nr_pages, pages);
247
248 /*
249 * common case - we found all pages and they are contiguous,
250 * kick them off
251 */
252 if (i && (pages[i - 1]->index == index + i - 1))
253 goto splice_them;
254
255 /*
256 * fill shadow[] with pages at the right locations, so we only
257 * have to fill holes
258 */
Jens Axboe53cd9ae2006-04-02 23:04:21 +0200259 memset(shadow, 0, nr_pages * sizeof(struct page *));
260 for (j = 0; j < i; j++)
261 shadow[pages[j]->index - index] = pages[j];
Jens Axboe5274f052006-03-30 15:15:30 +0200262
263 /*
264 * now fill in the holes
265 */
266 for (i = 0, pidx = index; i < nr_pages; pidx++, i++) {
267 int error;
268
269 if (shadow[i])
270 continue;
271
272 /*
273 * no page there, look one up / create it
274 */
275 page = find_or_create_page(mapping, pidx,
276 mapping_gfp_mask(mapping));
277 if (!page)
278 break;
279
280 if (PageUptodate(page))
281 unlock_page(page);
282 else {
283 error = mapping->a_ops->readpage(in, page);
284
285 if (unlikely(error)) {
286 page_cache_release(page);
287 break;
288 }
289 }
290 shadow[i] = page;
291 }
292
293 if (!i) {
294 for (i = 0; i < nr_pages; i++) {
295 if (shadow[i])
296 page_cache_release(shadow[i]);
297 }
298 return 0;
299 }
300
301 memcpy(pages, shadow, i * sizeof(struct page *));
302
303 /*
304 * Now we splice them into the pipe..
305 */
306splice_them:
Linus Torvalds29e35092006-04-02 12:46:35 -0700307 return move_to_pipe(pipe, pages, i, offset, len, flags);
Jens Axboe5274f052006-03-30 15:15:30 +0200308}
309
Jens Axboe83f91352006-04-02 23:05:09 +0200310/**
311 * generic_file_splice_read - splice data from file to a pipe
312 * @in: file to splice from
313 * @pipe: pipe to splice to
314 * @len: number of bytes to splice
315 * @flags: splice modifier flags
316 *
317 * Will read pages from given file and fill them into a pipe.
318 *
319 */
Jens Axboe5274f052006-03-30 15:15:30 +0200320ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
321 size_t len, unsigned int flags)
322{
323 ssize_t spliced;
324 int ret;
325
326 ret = 0;
327 spliced = 0;
328 while (len) {
Linus Torvalds29e35092006-04-02 12:46:35 -0700329 ret = __generic_file_splice_read(in, pipe, len, flags);
Jens Axboe5274f052006-03-30 15:15:30 +0200330
331 if (ret <= 0)
332 break;
333
334 in->f_pos += ret;
335 len -= ret;
336 spliced += ret;
Linus Torvalds29e35092006-04-02 12:46:35 -0700337
338 if (!(flags & SPLICE_F_NONBLOCK))
339 continue;
340 ret = -EAGAIN;
341 break;
Jens Axboe5274f052006-03-30 15:15:30 +0200342 }
343
344 if (spliced)
345 return spliced;
346
347 return ret;
348}
349
Jens Axboe059a8f32006-04-02 23:06:05 +0200350EXPORT_SYMBOL(generic_file_splice_read);
351
Jens Axboe5274f052006-03-30 15:15:30 +0200352/*
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200353 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
354 * using sendpage().
Jens Axboe5274f052006-03-30 15:15:30 +0200355 */
356static int pipe_to_sendpage(struct pipe_inode_info *info,
357 struct pipe_buffer *buf, struct splice_desc *sd)
358{
359 struct file *file = sd->file;
360 loff_t pos = sd->pos;
361 unsigned int offset;
362 ssize_t ret;
363 void *ptr;
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200364 int more;
Jens Axboe5274f052006-03-30 15:15:30 +0200365
366 /*
367 * sub-optimal, but we are limited by the pipe ->map. we don't
368 * need a kmap'ed buffer here, we just want to make sure we
369 * have the page pinned if the pipe page originates from the
370 * page cache
371 */
372 ptr = buf->ops->map(file, info, buf);
373 if (IS_ERR(ptr))
374 return PTR_ERR(ptr);
375
376 offset = pos & ~PAGE_CACHE_MASK;
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200377 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
Jens Axboe5274f052006-03-30 15:15:30 +0200378
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200379 ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
Jens Axboe5274f052006-03-30 15:15:30 +0200380
381 buf->ops->unmap(info, buf);
382 if (ret == sd->len)
383 return 0;
384
385 return -EIO;
386}
387
388/*
389 * This is a little more tricky than the file -> pipe splicing. There are
390 * basically three cases:
391 *
392 * - Destination page already exists in the address space and there
393 * are users of it. For that case we have no other option that
394 * copying the data. Tough luck.
395 * - Destination page already exists in the address space, but there
396 * are no users of it. Make sure it's uptodate, then drop it. Fall
397 * through to last case.
398 * - Destination page does not exist, we can add the pipe page to
399 * the page cache and avoid the copy.
400 *
Jens Axboe83f91352006-04-02 23:05:09 +0200401 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
402 * sd->flags), we attempt to migrate pages from the pipe to the output
403 * file address space page cache. This is possible if no one else has
404 * the pipe page referenced outside of the pipe and page cache. If
405 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
406 * a new page in the output file page cache and fill/dirty that.
Jens Axboe5274f052006-03-30 15:15:30 +0200407 */
408static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
409 struct splice_desc *sd)
410{
411 struct file *file = sd->file;
412 struct address_space *mapping = file->f_mapping;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200413 gfp_t gfp_mask = mapping_gfp_mask(mapping);
Jens Axboe5274f052006-03-30 15:15:30 +0200414 unsigned int offset;
415 struct page *page;
Jens Axboe5274f052006-03-30 15:15:30 +0200416 pgoff_t index;
Jens Axboe5abc97a2006-03-30 15:16:46 +0200417 char *src;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200418 int ret;
Jens Axboe5274f052006-03-30 15:15:30 +0200419
420 /*
421 * after this, page will be locked and unmapped
422 */
423 src = buf->ops->map(file, info, buf);
424 if (IS_ERR(src))
425 return PTR_ERR(src);
426
427 index = sd->pos >> PAGE_CACHE_SHIFT;
428 offset = sd->pos & ~PAGE_CACHE_MASK;
429
Jens Axboe5274f052006-03-30 15:15:30 +0200430 /*
Jens Axboe5abc97a2006-03-30 15:16:46 +0200431 * reuse buf page, if SPLICE_F_MOVE is set
Jens Axboe5274f052006-03-30 15:15:30 +0200432 */
Jens Axboe5abc97a2006-03-30 15:16:46 +0200433 if (sd->flags & SPLICE_F_MOVE) {
Jens Axboe83f91352006-04-02 23:05:09 +0200434 /*
435 * If steal succeeds, buf->page is now pruned from the vm
436 * side (LRU and page cache) and we can reuse it.
437 */
Jens Axboe5abc97a2006-03-30 15:16:46 +0200438 if (buf->ops->steal(info, buf))
439 goto find_page;
Jens Axboe5274f052006-03-30 15:15:30 +0200440
Jens Axboe5abc97a2006-03-30 15:16:46 +0200441 page = buf->page;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200442 if (add_to_page_cache(page, mapping, index, gfp_mask))
Jens Axboe5abc97a2006-03-30 15:16:46 +0200443 goto find_page;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200444
445 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
446 lru_cache_add(page);
Jens Axboe5abc97a2006-03-30 15:16:46 +0200447 } else {
448find_page:
449 ret = -ENOMEM;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200450 page = find_or_create_page(mapping, index, gfp_mask);
Jens Axboe5abc97a2006-03-30 15:16:46 +0200451 if (!page)
Dave Jones9aefe432006-04-10 09:02:40 +0200452 goto out_nomem;
Jens Axboe5274f052006-03-30 15:15:30 +0200453
Jens Axboe5abc97a2006-03-30 15:16:46 +0200454 /*
455 * If the page is uptodate, it is also locked. If it isn't
456 * uptodate, we can mark it uptodate if we are filling the
457 * full page. Otherwise we need to read it in first...
458 */
459 if (!PageUptodate(page)) {
460 if (sd->len < PAGE_CACHE_SIZE) {
461 ret = mapping->a_ops->readpage(file, page);
462 if (unlikely(ret))
463 goto out;
464
465 lock_page(page);
466
467 if (!PageUptodate(page)) {
468 /*
469 * page got invalidated, repeat
470 */
471 if (!page->mapping) {
472 unlock_page(page);
473 page_cache_release(page);
474 goto find_page;
475 }
476 ret = -EIO;
477 goto out;
Jens Axboe5274f052006-03-30 15:15:30 +0200478 }
Jens Axboe5abc97a2006-03-30 15:16:46 +0200479 } else {
480 WARN_ON(!PageLocked(page));
481 SetPageUptodate(page);
Jens Axboe5274f052006-03-30 15:15:30 +0200482 }
Jens Axboe5274f052006-03-30 15:15:30 +0200483 }
484 }
485
486 ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200487 if (ret == AOP_TRUNCATED_PAGE) {
488 page_cache_release(page);
489 goto find_page;
490 } else if (ret)
Jens Axboe5274f052006-03-30 15:15:30 +0200491 goto out;
492
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200493 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
Jens Axboe5abc97a2006-03-30 15:16:46 +0200494 char *dst = kmap_atomic(page, KM_USER0);
495
496 memcpy(dst + offset, src + buf->offset, sd->len);
497 flush_dcache_page(page);
498 kunmap_atomic(dst, KM_USER0);
499 }
Jens Axboe5274f052006-03-30 15:15:30 +0200500
501 ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200502 if (ret == AOP_TRUNCATED_PAGE) {
503 page_cache_release(page);
504 goto find_page;
505 } else if (ret)
Jens Axboe5274f052006-03-30 15:15:30 +0200506 goto out;
507
Jens Axboec7f21e42006-04-10 09:01:01 +0200508 mark_page_accessed(page);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200509 balance_dirty_pages_ratelimited(mapping);
Jens Axboe5274f052006-03-30 15:15:30 +0200510out:
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200511 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
Jens Axboe5abc97a2006-03-30 15:16:46 +0200512 page_cache_release(page);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200513 unlock_page(page);
514 }
Dave Jones9aefe432006-04-10 09:02:40 +0200515out_nomem:
Jens Axboe5274f052006-03-30 15:15:30 +0200516 buf->ops->unmap(info, buf);
517 return ret;
518}
519
520typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
521 struct splice_desc *);
522
Jens Axboe83f91352006-04-02 23:05:09 +0200523/*
524 * Pipe input worker. Most of this logic works like a regular pipe, the
525 * key here is the 'actor' worker passed in that actually moves the data
526 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
527 */
Jens Axboe5274f052006-03-30 15:15:30 +0200528static ssize_t move_from_pipe(struct inode *inode, struct file *out,
529 size_t len, unsigned int flags,
530 splice_actor *actor)
531{
532 struct pipe_inode_info *info;
533 int ret, do_wakeup, err;
534 struct splice_desc sd;
535
536 ret = 0;
537 do_wakeup = 0;
538
539 sd.total_len = len;
540 sd.flags = flags;
541 sd.file = out;
542 sd.pos = out->f_pos;
543
544 mutex_lock(PIPE_MUTEX(*inode));
545
546 info = inode->i_pipe;
547 for (;;) {
548 int bufs = info->nrbufs;
549
550 if (bufs) {
551 int curbuf = info->curbuf;
552 struct pipe_buffer *buf = info->bufs + curbuf;
553 struct pipe_buf_operations *ops = buf->ops;
554
555 sd.len = buf->len;
556 if (sd.len > sd.total_len)
557 sd.len = sd.total_len;
558
559 err = actor(info, buf, &sd);
560 if (err) {
561 if (!ret && err != -ENODATA)
562 ret = err;
563
564 break;
565 }
566
567 ret += sd.len;
568 buf->offset += sd.len;
569 buf->len -= sd.len;
570 if (!buf->len) {
571 buf->ops = NULL;
572 ops->release(info, buf);
573 curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
574 info->curbuf = curbuf;
575 info->nrbufs = --bufs;
576 do_wakeup = 1;
577 }
578
579 sd.pos += sd.len;
580 sd.total_len -= sd.len;
581 if (!sd.total_len)
582 break;
583 }
584
585 if (bufs)
586 continue;
587 if (!PIPE_WRITERS(*inode))
588 break;
589 if (!PIPE_WAITING_WRITERS(*inode)) {
590 if (ret)
591 break;
592 }
593
Linus Torvalds29e35092006-04-02 12:46:35 -0700594 if (flags & SPLICE_F_NONBLOCK) {
595 if (!ret)
596 ret = -EAGAIN;
597 break;
598 }
599
Jens Axboe5274f052006-03-30 15:15:30 +0200600 if (signal_pending(current)) {
601 if (!ret)
602 ret = -ERESTARTSYS;
603 break;
604 }
605
606 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200607 smp_mb();
608 if (waitqueue_active(PIPE_WAIT(*inode)))
609 wake_up_interruptible_sync(PIPE_WAIT(*inode));
Jens Axboe5274f052006-03-30 15:15:30 +0200610 kill_fasync(PIPE_FASYNC_WRITERS(*inode),SIGIO,POLL_OUT);
611 do_wakeup = 0;
612 }
613
614 pipe_wait(inode);
615 }
616
617 mutex_unlock(PIPE_MUTEX(*inode));
618
619 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200620 smp_mb();
621 if (waitqueue_active(PIPE_WAIT(*inode)))
622 wake_up_interruptible(PIPE_WAIT(*inode));
Jens Axboe5274f052006-03-30 15:15:30 +0200623 kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
624 }
625
626 mutex_lock(&out->f_mapping->host->i_mutex);
627 out->f_pos = sd.pos;
628 mutex_unlock(&out->f_mapping->host->i_mutex);
629 return ret;
630
631}
632
Jens Axboe83f91352006-04-02 23:05:09 +0200633/**
634 * generic_file_splice_write - splice data from a pipe to a file
635 * @inode: pipe inode
636 * @out: file to write to
637 * @len: number of bytes to splice
638 * @flags: splice modifier flags
639 *
640 * Will either move or copy pages (determined by @flags options) from
641 * the given pipe inode to the given file.
642 *
643 */
Jens Axboe5274f052006-03-30 15:15:30 +0200644ssize_t generic_file_splice_write(struct inode *inode, struct file *out,
645 size_t len, unsigned int flags)
646{
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200647 struct address_space *mapping = out->f_mapping;
648 ssize_t ret = move_from_pipe(inode, out, len, flags, pipe_to_file);
649
650 /*
651 * if file or inode is SYNC and we actually wrote some data, sync it
652 */
653 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
654 && ret > 0) {
655 struct inode *inode = mapping->host;
656 int err;
657
658 mutex_lock(&inode->i_mutex);
659 err = generic_osync_inode(mapping->host, mapping,
660 OSYNC_METADATA|OSYNC_DATA);
661 mutex_unlock(&inode->i_mutex);
662
663 if (err)
664 ret = err;
665 }
666
667 return ret;
Jens Axboe5274f052006-03-30 15:15:30 +0200668}
669
Jens Axboe059a8f32006-04-02 23:06:05 +0200670EXPORT_SYMBOL(generic_file_splice_write);
671
Jens Axboe83f91352006-04-02 23:05:09 +0200672/**
673 * generic_splice_sendpage - splice data from a pipe to a socket
674 * @inode: pipe inode
675 * @out: socket to write to
676 * @len: number of bytes to splice
677 * @flags: splice modifier flags
678 *
679 * Will send @len bytes from the pipe to a network socket. No data copying
680 * is involved.
681 *
682 */
Jens Axboe5274f052006-03-30 15:15:30 +0200683ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
684 size_t len, unsigned int flags)
685{
686 return move_from_pipe(inode, out, len, flags, pipe_to_sendpage);
687}
688
Jens Axboe059a8f32006-04-02 23:06:05 +0200689EXPORT_SYMBOL(generic_splice_sendpage);
Jeff Garzika0f06782006-03-30 23:06:13 -0500690
Jens Axboe83f91352006-04-02 23:05:09 +0200691/*
692 * Attempt to initiate a splice from pipe to file.
693 */
Jens Axboe5274f052006-03-30 15:15:30 +0200694static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
695 unsigned int flags)
696{
697 loff_t pos;
698 int ret;
699
700 if (!out->f_op || !out->f_op->splice_write)
701 return -EINVAL;
702
703 if (!(out->f_mode & FMODE_WRITE))
704 return -EBADF;
705
706 pos = out->f_pos;
707 ret = rw_verify_area(WRITE, out, &pos, len);
708 if (unlikely(ret < 0))
709 return ret;
710
711 return out->f_op->splice_write(pipe, out, len, flags);
712}
713
Jens Axboe83f91352006-04-02 23:05:09 +0200714/*
715 * Attempt to initiate a splice from a file to a pipe.
716 */
Jens Axboe5274f052006-03-30 15:15:30 +0200717static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
718 unsigned int flags)
719{
720 loff_t pos, isize, left;
721 int ret;
722
723 if (!in->f_op || !in->f_op->splice_read)
724 return -EINVAL;
725
726 if (!(in->f_mode & FMODE_READ))
727 return -EBADF;
728
729 pos = in->f_pos;
730 ret = rw_verify_area(READ, in, &pos, len);
731 if (unlikely(ret < 0))
732 return ret;
733
734 isize = i_size_read(in->f_mapping->host);
735 if (unlikely(in->f_pos >= isize))
736 return 0;
737
738 left = isize - in->f_pos;
739 if (left < len)
740 len = left;
741
742 return in->f_op->splice_read(in, pipe, len, flags);
743}
744
Jens Axboe83f91352006-04-02 23:05:09 +0200745/*
746 * Determine where to splice to/from.
747 */
Jens Axboe5274f052006-03-30 15:15:30 +0200748static long do_splice(struct file *in, struct file *out, size_t len,
749 unsigned int flags)
750{
751 struct inode *pipe;
752
753 pipe = in->f_dentry->d_inode;
754 if (pipe->i_pipe)
755 return do_splice_from(pipe, out, len, flags);
756
757 pipe = out->f_dentry->d_inode;
758 if (pipe->i_pipe)
759 return do_splice_to(in, pipe, len, flags);
760
761 return -EINVAL;
762}
763
764asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags)
765{
766 long error;
767 struct file *in, *out;
768 int fput_in, fput_out;
769
770 if (unlikely(!len))
771 return 0;
772
773 error = -EBADF;
774 in = fget_light(fdin, &fput_in);
775 if (in) {
776 if (in->f_mode & FMODE_READ) {
777 out = fget_light(fdout, &fput_out);
778 if (out) {
779 if (out->f_mode & FMODE_WRITE)
780 error = do_splice(in, out, len, flags);
781 fput_light(out, fput_out);
782 }
783 }
784
785 fput_light(in, fput_in);
786 }
787
788 return error;
789}