blob: b5fb2f3e3ac6c3525c6567f56288ea4553e2568d [file] [log] [blame]
Jens Axboe5274f052006-03-30 15:15:30 +02001/*
2 * "splice": joining two ropes together by interweaving their strands.
3 *
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
7 *
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
10 *
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files and fixing the initial implementation
13 * bugs.
14 *
15 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
17 *
18 */
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/pagemap.h>
22#include <linux/pipe_fs_i.h>
23#include <linux/mm_inline.h>
Jens Axboe5abc97a2006-03-30 15:16:46 +020024#include <linux/swap.h>
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020025#include <linux/writeback.h>
26#include <linux/buffer_head.h>
Jeff Garzika0f06782006-03-30 23:06:13 -050027#include <linux/module.h>
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020028#include <linux/syscalls.h>
Jens Axboe5274f052006-03-30 15:15:30 +020029
30/*
31 * Passed to the actors
32 */
33struct splice_desc {
34 unsigned int len, total_len; /* current and remaining length */
35 unsigned int flags; /* splice flags */
36 struct file *file; /* file to read/write */
37 loff_t pos; /* file position */
38};
39
Jens Axboe83f91352006-04-02 23:05:09 +020040/*
41 * Attempt to steal a page from a pipe buffer. This should perhaps go into
42 * a vm helper function, it's already simplified quite a bit by the
43 * addition of remove_mapping(). If success is returned, the caller may
44 * attempt to reuse this page for another destination.
45 */
Jens Axboe5abc97a2006-03-30 15:16:46 +020046static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47 struct pipe_buffer *buf)
48{
49 struct page *page = buf->page;
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020050 struct address_space *mapping = page_mapping(page);
Jens Axboe5abc97a2006-03-30 15:16:46 +020051
52 WARN_ON(!PageLocked(page));
53 WARN_ON(!PageUptodate(page));
54
Jens Axboead8d6f02006-04-02 23:10:32 +020055 /*
56 * At least for ext2 with nobh option, we need to wait on writeback
57 * completing on this page, since we'll remove it from the pagecache.
58 * Otherwise truncate wont wait on the page, allowing the disk
59 * blocks to be reused by someone else before we actually wrote our
60 * data to them. fs corruption ensues.
61 */
62 wait_on_page_writeback(page);
63
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020064 if (PagePrivate(page))
65 try_to_release_page(page, mapping_gfp_mask(mapping));
66
67 if (!remove_mapping(mapping, page))
Jens Axboe5abc97a2006-03-30 15:16:46 +020068 return 1;
69
70 if (PageLRU(page)) {
71 struct zone *zone = page_zone(page);
72
73 spin_lock_irq(&zone->lru_lock);
74 BUG_ON(!PageLRU(page));
75 __ClearPageLRU(page);
76 del_page_from_lru(zone, page);
77 spin_unlock_irq(&zone->lru_lock);
78 }
79
Jens Axboe5abc97a2006-03-30 15:16:46 +020080 return 0;
81}
82
Jens Axboe5274f052006-03-30 15:15:30 +020083static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
84 struct pipe_buffer *buf)
85{
86 page_cache_release(buf->page);
87 buf->page = NULL;
88}
89
90static void *page_cache_pipe_buf_map(struct file *file,
91 struct pipe_inode_info *info,
92 struct pipe_buffer *buf)
93{
94 struct page *page = buf->page;
95
96 lock_page(page);
97
98 if (!PageUptodate(page)) {
99 unlock_page(page);
100 return ERR_PTR(-EIO);
101 }
102
103 if (!page->mapping) {
104 unlock_page(page);
105 return ERR_PTR(-ENODATA);
106 }
107
108 return kmap(buf->page);
109}
110
111static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
112 struct pipe_buffer *buf)
113{
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200114 unlock_page(buf->page);
Jens Axboe5274f052006-03-30 15:15:30 +0200115 kunmap(buf->page);
116}
117
118static struct pipe_buf_operations page_cache_pipe_buf_ops = {
119 .can_merge = 0,
120 .map = page_cache_pipe_buf_map,
121 .unmap = page_cache_pipe_buf_unmap,
122 .release = page_cache_pipe_buf_release,
Jens Axboe5abc97a2006-03-30 15:16:46 +0200123 .steal = page_cache_pipe_buf_steal,
Jens Axboe5274f052006-03-30 15:15:30 +0200124};
125
Jens Axboe83f91352006-04-02 23:05:09 +0200126/*
127 * Pipe output worker. This sets up our pipe format with the page cache
128 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
129 */
Jens Axboe5274f052006-03-30 15:15:30 +0200130static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
131 int nr_pages, unsigned long offset,
Linus Torvalds29e35092006-04-02 12:46:35 -0700132 unsigned long len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200133{
134 struct pipe_inode_info *info;
135 int ret, do_wakeup, i;
136
137 ret = 0;
138 do_wakeup = 0;
139 i = 0;
140
141 mutex_lock(PIPE_MUTEX(*inode));
142
143 info = inode->i_pipe;
144 for (;;) {
145 int bufs;
146
147 if (!PIPE_READERS(*inode)) {
148 send_sig(SIGPIPE, current, 0);
149 if (!ret)
150 ret = -EPIPE;
151 break;
152 }
153
154 bufs = info->nrbufs;
155 if (bufs < PIPE_BUFFERS) {
156 int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS - 1);
157 struct pipe_buffer *buf = info->bufs + newbuf;
158 struct page *page = pages[i++];
159 unsigned long this_len;
160
161 this_len = PAGE_CACHE_SIZE - offset;
162 if (this_len > len)
163 this_len = len;
164
165 buf->page = page;
166 buf->offset = offset;
167 buf->len = this_len;
168 buf->ops = &page_cache_pipe_buf_ops;
169 info->nrbufs = ++bufs;
170 do_wakeup = 1;
171
172 ret += this_len;
173 len -= this_len;
174 offset = 0;
175 if (!--nr_pages)
176 break;
177 if (!len)
178 break;
179 if (bufs < PIPE_BUFFERS)
180 continue;
181
182 break;
183 }
184
Linus Torvalds29e35092006-04-02 12:46:35 -0700185 if (flags & SPLICE_F_NONBLOCK) {
186 if (!ret)
187 ret = -EAGAIN;
188 break;
189 }
190
Jens Axboe5274f052006-03-30 15:15:30 +0200191 if (signal_pending(current)) {
192 if (!ret)
193 ret = -ERESTARTSYS;
194 break;
195 }
196
197 if (do_wakeup) {
198 wake_up_interruptible_sync(PIPE_WAIT(*inode));
199 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO,
200 POLL_IN);
201 do_wakeup = 0;
202 }
203
204 PIPE_WAITING_WRITERS(*inode)++;
205 pipe_wait(inode);
206 PIPE_WAITING_WRITERS(*inode)--;
207 }
208
209 mutex_unlock(PIPE_MUTEX(*inode));
210
211 if (do_wakeup) {
212 wake_up_interruptible(PIPE_WAIT(*inode));
213 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
214 }
215
216 while (i < nr_pages)
217 page_cache_release(pages[i++]);
218
219 return ret;
220}
221
222static int __generic_file_splice_read(struct file *in, struct inode *pipe,
Linus Torvalds29e35092006-04-02 12:46:35 -0700223 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200224{
225 struct address_space *mapping = in->f_mapping;
226 unsigned int offset, nr_pages;
227 struct page *pages[PIPE_BUFFERS], *shadow[PIPE_BUFFERS];
228 struct page *page;
229 pgoff_t index, pidx;
230 int i, j;
231
232 index = in->f_pos >> PAGE_CACHE_SHIFT;
233 offset = in->f_pos & ~PAGE_CACHE_MASK;
234 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
235
236 if (nr_pages > PIPE_BUFFERS)
237 nr_pages = PIPE_BUFFERS;
238
239 /*
240 * initiate read-ahead on this page range
241 */
242 do_page_cache_readahead(mapping, in, index, nr_pages);
243
244 /*
245 * Get as many pages from the page cache as possible..
246 * Start IO on the page cache entries we create (we
247 * can assume that any pre-existing ones we find have
248 * already had IO started on them).
249 */
250 i = find_get_pages(mapping, index, nr_pages, pages);
251
252 /*
253 * common case - we found all pages and they are contiguous,
254 * kick them off
255 */
256 if (i && (pages[i - 1]->index == index + i - 1))
257 goto splice_them;
258
259 /*
260 * fill shadow[] with pages at the right locations, so we only
261 * have to fill holes
262 */
Jens Axboe53cd9ae2006-04-02 23:04:21 +0200263 memset(shadow, 0, nr_pages * sizeof(struct page *));
264 for (j = 0; j < i; j++)
265 shadow[pages[j]->index - index] = pages[j];
Jens Axboe5274f052006-03-30 15:15:30 +0200266
267 /*
268 * now fill in the holes
269 */
270 for (i = 0, pidx = index; i < nr_pages; pidx++, i++) {
271 int error;
272
273 if (shadow[i])
274 continue;
275
276 /*
277 * no page there, look one up / create it
278 */
279 page = find_or_create_page(mapping, pidx,
280 mapping_gfp_mask(mapping));
281 if (!page)
282 break;
283
284 if (PageUptodate(page))
285 unlock_page(page);
286 else {
287 error = mapping->a_ops->readpage(in, page);
288
289 if (unlikely(error)) {
290 page_cache_release(page);
291 break;
292 }
293 }
294 shadow[i] = page;
295 }
296
297 if (!i) {
298 for (i = 0; i < nr_pages; i++) {
299 if (shadow[i])
300 page_cache_release(shadow[i]);
301 }
302 return 0;
303 }
304
305 memcpy(pages, shadow, i * sizeof(struct page *));
306
307 /*
308 * Now we splice them into the pipe..
309 */
310splice_them:
Linus Torvalds29e35092006-04-02 12:46:35 -0700311 return move_to_pipe(pipe, pages, i, offset, len, flags);
Jens Axboe5274f052006-03-30 15:15:30 +0200312}
313
Jens Axboe83f91352006-04-02 23:05:09 +0200314/**
315 * generic_file_splice_read - splice data from file to a pipe
316 * @in: file to splice from
317 * @pipe: pipe to splice to
318 * @len: number of bytes to splice
319 * @flags: splice modifier flags
320 *
321 * Will read pages from given file and fill them into a pipe.
322 *
323 */
Jens Axboe5274f052006-03-30 15:15:30 +0200324ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
325 size_t len, unsigned int flags)
326{
327 ssize_t spliced;
328 int ret;
329
330 ret = 0;
331 spliced = 0;
332 while (len) {
Linus Torvalds29e35092006-04-02 12:46:35 -0700333 ret = __generic_file_splice_read(in, pipe, len, flags);
Jens Axboe5274f052006-03-30 15:15:30 +0200334
335 if (ret <= 0)
336 break;
337
338 in->f_pos += ret;
339 len -= ret;
340 spliced += ret;
Linus Torvalds29e35092006-04-02 12:46:35 -0700341
342 if (!(flags & SPLICE_F_NONBLOCK))
343 continue;
344 ret = -EAGAIN;
345 break;
Jens Axboe5274f052006-03-30 15:15:30 +0200346 }
347
348 if (spliced)
349 return spliced;
350
351 return ret;
352}
353
Jens Axboe059a8f32006-04-02 23:06:05 +0200354EXPORT_SYMBOL(generic_file_splice_read);
355
Jens Axboe5274f052006-03-30 15:15:30 +0200356/*
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200357 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
358 * using sendpage().
Jens Axboe5274f052006-03-30 15:15:30 +0200359 */
360static int pipe_to_sendpage(struct pipe_inode_info *info,
361 struct pipe_buffer *buf, struct splice_desc *sd)
362{
363 struct file *file = sd->file;
364 loff_t pos = sd->pos;
365 unsigned int offset;
366 ssize_t ret;
367 void *ptr;
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200368 int more;
Jens Axboe5274f052006-03-30 15:15:30 +0200369
370 /*
371 * sub-optimal, but we are limited by the pipe ->map. we don't
372 * need a kmap'ed buffer here, we just want to make sure we
373 * have the page pinned if the pipe page originates from the
374 * page cache
375 */
376 ptr = buf->ops->map(file, info, buf);
377 if (IS_ERR(ptr))
378 return PTR_ERR(ptr);
379
380 offset = pos & ~PAGE_CACHE_MASK;
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200381 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
Jens Axboe5274f052006-03-30 15:15:30 +0200382
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200383 ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
Jens Axboe5274f052006-03-30 15:15:30 +0200384
385 buf->ops->unmap(info, buf);
386 if (ret == sd->len)
387 return 0;
388
389 return -EIO;
390}
391
392/*
393 * This is a little more tricky than the file -> pipe splicing. There are
394 * basically three cases:
395 *
396 * - Destination page already exists in the address space and there
397 * are users of it. For that case we have no other option that
398 * copying the data. Tough luck.
399 * - Destination page already exists in the address space, but there
400 * are no users of it. Make sure it's uptodate, then drop it. Fall
401 * through to last case.
402 * - Destination page does not exist, we can add the pipe page to
403 * the page cache and avoid the copy.
404 *
Jens Axboe83f91352006-04-02 23:05:09 +0200405 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
406 * sd->flags), we attempt to migrate pages from the pipe to the output
407 * file address space page cache. This is possible if no one else has
408 * the pipe page referenced outside of the pipe and page cache. If
409 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
410 * a new page in the output file page cache and fill/dirty that.
Jens Axboe5274f052006-03-30 15:15:30 +0200411 */
412static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
413 struct splice_desc *sd)
414{
415 struct file *file = sd->file;
416 struct address_space *mapping = file->f_mapping;
417 unsigned int offset;
418 struct page *page;
Jens Axboe5274f052006-03-30 15:15:30 +0200419 pgoff_t index;
Jens Axboe5abc97a2006-03-30 15:16:46 +0200420 char *src;
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200421 int ret, stolen;
Jens Axboe5274f052006-03-30 15:15:30 +0200422
423 /*
424 * after this, page will be locked and unmapped
425 */
426 src = buf->ops->map(file, info, buf);
427 if (IS_ERR(src))
428 return PTR_ERR(src);
429
430 index = sd->pos >> PAGE_CACHE_SHIFT;
431 offset = sd->pos & ~PAGE_CACHE_MASK;
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200432 stolen = 0;
Jens Axboe5274f052006-03-30 15:15:30 +0200433
Jens Axboe5274f052006-03-30 15:15:30 +0200434 /*
Jens Axboe5abc97a2006-03-30 15:16:46 +0200435 * reuse buf page, if SPLICE_F_MOVE is set
Jens Axboe5274f052006-03-30 15:15:30 +0200436 */
Jens Axboe5abc97a2006-03-30 15:16:46 +0200437 if (sd->flags & SPLICE_F_MOVE) {
Jens Axboe83f91352006-04-02 23:05:09 +0200438 /*
439 * If steal succeeds, buf->page is now pruned from the vm
440 * side (LRU and page cache) and we can reuse it.
441 */
Jens Axboe5abc97a2006-03-30 15:16:46 +0200442 if (buf->ops->steal(info, buf))
443 goto find_page;
Jens Axboe5274f052006-03-30 15:15:30 +0200444
Jens Axboe5abc97a2006-03-30 15:16:46 +0200445 page = buf->page;
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200446 stolen = 1;
Jens Axboe5abc97a2006-03-30 15:16:46 +0200447 if (add_to_page_cache_lru(page, mapping, index,
448 mapping_gfp_mask(mapping)))
449 goto find_page;
450 } else {
451find_page:
452 ret = -ENOMEM;
453 page = find_or_create_page(mapping, index,
454 mapping_gfp_mask(mapping));
455 if (!page)
456 goto out;
Jens Axboe5274f052006-03-30 15:15:30 +0200457
Jens Axboe5abc97a2006-03-30 15:16:46 +0200458 /*
459 * If the page is uptodate, it is also locked. If it isn't
460 * uptodate, we can mark it uptodate if we are filling the
461 * full page. Otherwise we need to read it in first...
462 */
463 if (!PageUptodate(page)) {
464 if (sd->len < PAGE_CACHE_SIZE) {
465 ret = mapping->a_ops->readpage(file, page);
466 if (unlikely(ret))
467 goto out;
468
469 lock_page(page);
470
471 if (!PageUptodate(page)) {
472 /*
473 * page got invalidated, repeat
474 */
475 if (!page->mapping) {
476 unlock_page(page);
477 page_cache_release(page);
478 goto find_page;
479 }
480 ret = -EIO;
481 goto out;
Jens Axboe5274f052006-03-30 15:15:30 +0200482 }
Jens Axboe5abc97a2006-03-30 15:16:46 +0200483 } else {
484 WARN_ON(!PageLocked(page));
485 SetPageUptodate(page);
Jens Axboe5274f052006-03-30 15:15:30 +0200486 }
Jens Axboe5274f052006-03-30 15:15:30 +0200487 }
488 }
489
490 ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200491 if (ret == AOP_TRUNCATED_PAGE) {
492 page_cache_release(page);
493 goto find_page;
494 } else if (ret)
Jens Axboe5274f052006-03-30 15:15:30 +0200495 goto out;
496
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200497 if (!stolen) {
Jens Axboe5abc97a2006-03-30 15:16:46 +0200498 char *dst = kmap_atomic(page, KM_USER0);
499
500 memcpy(dst + offset, src + buf->offset, sd->len);
501 flush_dcache_page(page);
502 kunmap_atomic(dst, KM_USER0);
503 }
Jens Axboe5274f052006-03-30 15:15:30 +0200504
505 ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200506 if (ret == AOP_TRUNCATED_PAGE) {
507 page_cache_release(page);
508 goto find_page;
509 } else if (ret)
Jens Axboe5274f052006-03-30 15:15:30 +0200510 goto out;
511
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200512 balance_dirty_pages_ratelimited(mapping);
Jens Axboe5274f052006-03-30 15:15:30 +0200513out:
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200514 if (!stolen) {
Jens Axboe5abc97a2006-03-30 15:16:46 +0200515 page_cache_release(page);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200516 unlock_page(page);
517 }
Jens Axboe5274f052006-03-30 15:15:30 +0200518 buf->ops->unmap(info, buf);
519 return ret;
520}
521
522typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
523 struct splice_desc *);
524
Jens Axboe83f91352006-04-02 23:05:09 +0200525/*
526 * Pipe input worker. Most of this logic works like a regular pipe, the
527 * key here is the 'actor' worker passed in that actually moves the data
528 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
529 */
Jens Axboe5274f052006-03-30 15:15:30 +0200530static ssize_t move_from_pipe(struct inode *inode, struct file *out,
531 size_t len, unsigned int flags,
532 splice_actor *actor)
533{
534 struct pipe_inode_info *info;
535 int ret, do_wakeup, err;
536 struct splice_desc sd;
537
538 ret = 0;
539 do_wakeup = 0;
540
541 sd.total_len = len;
542 sd.flags = flags;
543 sd.file = out;
544 sd.pos = out->f_pos;
545
546 mutex_lock(PIPE_MUTEX(*inode));
547
548 info = inode->i_pipe;
549 for (;;) {
550 int bufs = info->nrbufs;
551
552 if (bufs) {
553 int curbuf = info->curbuf;
554 struct pipe_buffer *buf = info->bufs + curbuf;
555 struct pipe_buf_operations *ops = buf->ops;
556
557 sd.len = buf->len;
558 if (sd.len > sd.total_len)
559 sd.len = sd.total_len;
560
561 err = actor(info, buf, &sd);
562 if (err) {
563 if (!ret && err != -ENODATA)
564 ret = err;
565
566 break;
567 }
568
569 ret += sd.len;
570 buf->offset += sd.len;
571 buf->len -= sd.len;
572 if (!buf->len) {
573 buf->ops = NULL;
574 ops->release(info, buf);
575 curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
576 info->curbuf = curbuf;
577 info->nrbufs = --bufs;
578 do_wakeup = 1;
579 }
580
581 sd.pos += sd.len;
582 sd.total_len -= sd.len;
583 if (!sd.total_len)
584 break;
585 }
586
587 if (bufs)
588 continue;
589 if (!PIPE_WRITERS(*inode))
590 break;
591 if (!PIPE_WAITING_WRITERS(*inode)) {
592 if (ret)
593 break;
594 }
595
Linus Torvalds29e35092006-04-02 12:46:35 -0700596 if (flags & SPLICE_F_NONBLOCK) {
597 if (!ret)
598 ret = -EAGAIN;
599 break;
600 }
601
Jens Axboe5274f052006-03-30 15:15:30 +0200602 if (signal_pending(current)) {
603 if (!ret)
604 ret = -ERESTARTSYS;
605 break;
606 }
607
608 if (do_wakeup) {
609 wake_up_interruptible_sync(PIPE_WAIT(*inode));
610 kill_fasync(PIPE_FASYNC_WRITERS(*inode),SIGIO,POLL_OUT);
611 do_wakeup = 0;
612 }
613
614 pipe_wait(inode);
615 }
616
617 mutex_unlock(PIPE_MUTEX(*inode));
618
619 if (do_wakeup) {
620 wake_up_interruptible(PIPE_WAIT(*inode));
621 kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
622 }
623
624 mutex_lock(&out->f_mapping->host->i_mutex);
625 out->f_pos = sd.pos;
626 mutex_unlock(&out->f_mapping->host->i_mutex);
627 return ret;
628
629}
630
Jens Axboe83f91352006-04-02 23:05:09 +0200631/**
632 * generic_file_splice_write - splice data from a pipe to a file
633 * @inode: pipe inode
634 * @out: file to write to
635 * @len: number of bytes to splice
636 * @flags: splice modifier flags
637 *
638 * Will either move or copy pages (determined by @flags options) from
639 * the given pipe inode to the given file.
640 *
641 */
Jens Axboe5274f052006-03-30 15:15:30 +0200642ssize_t generic_file_splice_write(struct inode *inode, struct file *out,
643 size_t len, unsigned int flags)
644{
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200645 struct address_space *mapping = out->f_mapping;
646 ssize_t ret = move_from_pipe(inode, out, len, flags, pipe_to_file);
647
648 /*
649 * if file or inode is SYNC and we actually wrote some data, sync it
650 */
651 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
652 && ret > 0) {
653 struct inode *inode = mapping->host;
654 int err;
655
656 mutex_lock(&inode->i_mutex);
657 err = generic_osync_inode(mapping->host, mapping,
658 OSYNC_METADATA|OSYNC_DATA);
659 mutex_unlock(&inode->i_mutex);
660
661 if (err)
662 ret = err;
663 }
664
665 return ret;
Jens Axboe5274f052006-03-30 15:15:30 +0200666}
667
Jens Axboe059a8f32006-04-02 23:06:05 +0200668EXPORT_SYMBOL(generic_file_splice_write);
669
Jens Axboe83f91352006-04-02 23:05:09 +0200670/**
671 * generic_splice_sendpage - splice data from a pipe to a socket
672 * @inode: pipe inode
673 * @out: socket to write to
674 * @len: number of bytes to splice
675 * @flags: splice modifier flags
676 *
677 * Will send @len bytes from the pipe to a network socket. No data copying
678 * is involved.
679 *
680 */
Jens Axboe5274f052006-03-30 15:15:30 +0200681ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
682 size_t len, unsigned int flags)
683{
684 return move_from_pipe(inode, out, len, flags, pipe_to_sendpage);
685}
686
Jens Axboe059a8f32006-04-02 23:06:05 +0200687EXPORT_SYMBOL(generic_splice_sendpage);
Jeff Garzika0f06782006-03-30 23:06:13 -0500688
Jens Axboe83f91352006-04-02 23:05:09 +0200689/*
690 * Attempt to initiate a splice from pipe to file.
691 */
Jens Axboe5274f052006-03-30 15:15:30 +0200692static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
693 unsigned int flags)
694{
695 loff_t pos;
696 int ret;
697
698 if (!out->f_op || !out->f_op->splice_write)
699 return -EINVAL;
700
701 if (!(out->f_mode & FMODE_WRITE))
702 return -EBADF;
703
704 pos = out->f_pos;
705 ret = rw_verify_area(WRITE, out, &pos, len);
706 if (unlikely(ret < 0))
707 return ret;
708
709 return out->f_op->splice_write(pipe, out, len, flags);
710}
711
Jens Axboe83f91352006-04-02 23:05:09 +0200712/*
713 * Attempt to initiate a splice from a file to a pipe.
714 */
Jens Axboe5274f052006-03-30 15:15:30 +0200715static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
716 unsigned int flags)
717{
718 loff_t pos, isize, left;
719 int ret;
720
721 if (!in->f_op || !in->f_op->splice_read)
722 return -EINVAL;
723
724 if (!(in->f_mode & FMODE_READ))
725 return -EBADF;
726
727 pos = in->f_pos;
728 ret = rw_verify_area(READ, in, &pos, len);
729 if (unlikely(ret < 0))
730 return ret;
731
732 isize = i_size_read(in->f_mapping->host);
733 if (unlikely(in->f_pos >= isize))
734 return 0;
735
736 left = isize - in->f_pos;
737 if (left < len)
738 len = left;
739
740 return in->f_op->splice_read(in, pipe, len, flags);
741}
742
Jens Axboe83f91352006-04-02 23:05:09 +0200743/*
744 * Determine where to splice to/from.
745 */
Jens Axboe5274f052006-03-30 15:15:30 +0200746static long do_splice(struct file *in, struct file *out, size_t len,
747 unsigned int flags)
748{
749 struct inode *pipe;
750
751 pipe = in->f_dentry->d_inode;
752 if (pipe->i_pipe)
753 return do_splice_from(pipe, out, len, flags);
754
755 pipe = out->f_dentry->d_inode;
756 if (pipe->i_pipe)
757 return do_splice_to(in, pipe, len, flags);
758
759 return -EINVAL;
760}
761
762asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags)
763{
764 long error;
765 struct file *in, *out;
766 int fput_in, fput_out;
767
768 if (unlikely(!len))
769 return 0;
770
771 error = -EBADF;
772 in = fget_light(fdin, &fput_in);
773 if (in) {
774 if (in->f_mode & FMODE_READ) {
775 out = fget_light(fdout, &fput_out);
776 if (out) {
777 if (out->f_mode & FMODE_WRITE)
778 error = do_splice(in, out, len, flags);
779 fput_light(out, fput_out);
780 }
781 }
782
783 fput_light(in, fput_in);
784 }
785
786 return error;
787}