blob: 36bc262dfbd58c9e64204483197bbc52046cb3b5 [file] [log] [blame]
Jens Axboe5274f052006-03-30 15:15:30 +02001/*
2 * "splice": joining two ropes together by interweaving their strands.
3 *
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
7 *
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
10 *
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files and fixing the initial implementation
13 * bugs.
14 *
15 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
17 *
18 */
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/pagemap.h>
22#include <linux/pipe_fs_i.h>
23#include <linux/mm_inline.h>
Jens Axboe5abc97a2006-03-30 15:16:46 +020024#include <linux/swap.h>
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020025#include <linux/writeback.h>
26#include <linux/buffer_head.h>
Jeff Garzika0f06782006-03-30 23:06:13 -050027#include <linux/module.h>
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020028#include <linux/syscalls.h>
Jens Axboe5274f052006-03-30 15:15:30 +020029
30/*
31 * Passed to the actors
32 */
33struct splice_desc {
34 unsigned int len, total_len; /* current and remaining length */
35 unsigned int flags; /* splice flags */
36 struct file *file; /* file to read/write */
37 loff_t pos; /* file position */
38};
39
Jens Axboe83f91352006-04-02 23:05:09 +020040/*
41 * Attempt to steal a page from a pipe buffer. This should perhaps go into
42 * a vm helper function, it's already simplified quite a bit by the
43 * addition of remove_mapping(). If success is returned, the caller may
44 * attempt to reuse this page for another destination.
45 */
Jens Axboe5abc97a2006-03-30 15:16:46 +020046static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47 struct pipe_buffer *buf)
48{
49 struct page *page = buf->page;
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020050 struct address_space *mapping = page_mapping(page);
Jens Axboe5abc97a2006-03-30 15:16:46 +020051
52 WARN_ON(!PageLocked(page));
53 WARN_ON(!PageUptodate(page));
54
Jens Axboead8d6f02006-04-02 23:10:32 +020055 /*
56 * At least for ext2 with nobh option, we need to wait on writeback
57 * completing on this page, since we'll remove it from the pagecache.
58 * Otherwise truncate wont wait on the page, allowing the disk
59 * blocks to be reused by someone else before we actually wrote our
60 * data to them. fs corruption ensues.
61 */
62 wait_on_page_writeback(page);
63
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020064 if (PagePrivate(page))
65 try_to_release_page(page, mapping_gfp_mask(mapping));
66
67 if (!remove_mapping(mapping, page))
Jens Axboe5abc97a2006-03-30 15:16:46 +020068 return 1;
69
Jens Axboe3e7ee3e2006-04-02 23:11:04 +020070 buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
Jens Axboe5abc97a2006-03-30 15:16:46 +020071 return 0;
72}
73
Jens Axboe5274f052006-03-30 15:15:30 +020074static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
75 struct pipe_buffer *buf)
76{
77 page_cache_release(buf->page);
78 buf->page = NULL;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +020079 buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
Jens Axboe5274f052006-03-30 15:15:30 +020080}
81
82static void *page_cache_pipe_buf_map(struct file *file,
83 struct pipe_inode_info *info,
84 struct pipe_buffer *buf)
85{
86 struct page *page = buf->page;
Jens Axboe49d0b212006-04-10 09:04:41 +020087 int err;
Jens Axboe5274f052006-03-30 15:15:30 +020088
89 if (!PageUptodate(page)) {
Jens Axboe49d0b212006-04-10 09:04:41 +020090 lock_page(page);
91
92 /*
93 * Page got truncated/unhashed. This will cause a 0-byte
94 * splice, if this is the first page
95 */
96 if (!page->mapping) {
97 err = -ENODATA;
98 goto error;
99 }
100
101 /*
102 * uh oh, read-error from disk
103 */
104 if (!PageUptodate(page)) {
105 err = -EIO;
106 goto error;
107 }
108
109 /*
110 * page is ok afterall, fall through to mapping
111 */
Jens Axboe5274f052006-03-30 15:15:30 +0200112 unlock_page(page);
Jens Axboe5274f052006-03-30 15:15:30 +0200113 }
114
Jens Axboe49d0b212006-04-10 09:04:41 +0200115 return kmap(page);
116error:
117 unlock_page(page);
118 return ERR_PTR(err);
Jens Axboe5274f052006-03-30 15:15:30 +0200119}
120
121static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
122 struct pipe_buffer *buf)
123{
Jens Axboe5274f052006-03-30 15:15:30 +0200124 kunmap(buf->page);
125}
126
127static struct pipe_buf_operations page_cache_pipe_buf_ops = {
128 .can_merge = 0,
129 .map = page_cache_pipe_buf_map,
130 .unmap = page_cache_pipe_buf_unmap,
131 .release = page_cache_pipe_buf_release,
Jens Axboe5abc97a2006-03-30 15:16:46 +0200132 .steal = page_cache_pipe_buf_steal,
Jens Axboe5274f052006-03-30 15:15:30 +0200133};
134
Jens Axboe83f91352006-04-02 23:05:09 +0200135/*
136 * Pipe output worker. This sets up our pipe format with the page cache
137 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
138 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200139static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
Jens Axboe5274f052006-03-30 15:15:30 +0200140 int nr_pages, unsigned long offset,
Linus Torvalds29e35092006-04-02 12:46:35 -0700141 unsigned long len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200142{
Jens Axboe5274f052006-03-30 15:15:30 +0200143 int ret, do_wakeup, i;
144
145 ret = 0;
146 do_wakeup = 0;
147 i = 0;
148
Ingo Molnar3a326a22006-04-10 15:18:35 +0200149 if (pipe->inode)
150 mutex_lock(&pipe->inode->i_mutex);
Jens Axboe5274f052006-03-30 15:15:30 +0200151
Jens Axboe5274f052006-03-30 15:15:30 +0200152 for (;;) {
Ingo Molnar3a326a22006-04-10 15:18:35 +0200153 if (!pipe->readers) {
Jens Axboe5274f052006-03-30 15:15:30 +0200154 send_sig(SIGPIPE, current, 0);
155 if (!ret)
156 ret = -EPIPE;
157 break;
158 }
159
Jens Axboe6f767b02006-04-11 13:53:56 +0200160 if (pipe->nrbufs < PIPE_BUFFERS) {
161 int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
Ingo Molnar3a326a22006-04-10 15:18:35 +0200162 struct pipe_buffer *buf = pipe->bufs + newbuf;
Jens Axboe5274f052006-03-30 15:15:30 +0200163 struct page *page = pages[i++];
164 unsigned long this_len;
165
166 this_len = PAGE_CACHE_SIZE - offset;
167 if (this_len > len)
168 this_len = len;
169
170 buf->page = page;
171 buf->offset = offset;
172 buf->len = this_len;
173 buf->ops = &page_cache_pipe_buf_ops;
Jens Axboe6f767b02006-04-11 13:53:56 +0200174 pipe->nrbufs++;
175 if (pipe->inode)
176 do_wakeup = 1;
Jens Axboe5274f052006-03-30 15:15:30 +0200177
178 ret += this_len;
179 len -= this_len;
180 offset = 0;
181 if (!--nr_pages)
182 break;
183 if (!len)
184 break;
Jens Axboe6f767b02006-04-11 13:53:56 +0200185 if (pipe->nrbufs < PIPE_BUFFERS)
Jens Axboe5274f052006-03-30 15:15:30 +0200186 continue;
187
188 break;
189 }
190
Linus Torvalds29e35092006-04-02 12:46:35 -0700191 if (flags & SPLICE_F_NONBLOCK) {
192 if (!ret)
193 ret = -EAGAIN;
194 break;
195 }
196
Jens Axboe5274f052006-03-30 15:15:30 +0200197 if (signal_pending(current)) {
198 if (!ret)
199 ret = -ERESTARTSYS;
200 break;
201 }
202
203 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200204 smp_mb();
Ingo Molnar3a326a22006-04-10 15:18:35 +0200205 if (waitqueue_active(&pipe->wait))
206 wake_up_interruptible_sync(&pipe->wait);
207 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
Jens Axboe5274f052006-03-30 15:15:30 +0200208 do_wakeup = 0;
209 }
210
Ingo Molnar3a326a22006-04-10 15:18:35 +0200211 pipe->waiting_writers++;
212 pipe_wait(pipe);
213 pipe->waiting_writers--;
Jens Axboe5274f052006-03-30 15:15:30 +0200214 }
215
Ingo Molnar3a326a22006-04-10 15:18:35 +0200216 if (pipe->inode)
217 mutex_unlock(&pipe->inode->i_mutex);
Jens Axboe5274f052006-03-30 15:15:30 +0200218
219 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200220 smp_mb();
Ingo Molnar3a326a22006-04-10 15:18:35 +0200221 if (waitqueue_active(&pipe->wait))
222 wake_up_interruptible(&pipe->wait);
223 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
Jens Axboe5274f052006-03-30 15:15:30 +0200224 }
225
226 while (i < nr_pages)
227 page_cache_release(pages[i++]);
228
229 return ret;
230}
231
Ingo Molnar3a326a22006-04-10 15:18:35 +0200232static int
233__generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
234 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200235{
236 struct address_space *mapping = in->f_mapping;
237 unsigned int offset, nr_pages;
Jens Axboe16c523d2006-04-10 09:03:58 +0200238 struct page *pages[PIPE_BUFFERS];
Jens Axboe5274f052006-03-30 15:15:30 +0200239 struct page *page;
Jens Axboe16c523d2006-04-10 09:03:58 +0200240 pgoff_t index;
Jens Axboe7480a902006-04-11 13:52:47 +0200241 int i, error;
Jens Axboe5274f052006-03-30 15:15:30 +0200242
243 index = in->f_pos >> PAGE_CACHE_SHIFT;
244 offset = in->f_pos & ~PAGE_CACHE_MASK;
245 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
246
247 if (nr_pages > PIPE_BUFFERS)
248 nr_pages = PIPE_BUFFERS;
249
250 /*
Jens Axboe0b749ce2006-04-10 09:05:04 +0200251 * initiate read-ahead on this page range. however, don't call into
252 * read-ahead if this is a non-zero offset (we are likely doing small
253 * chunk splice and the page is already there) for a single page.
Jens Axboe5274f052006-03-30 15:15:30 +0200254 */
Jens Axboe0b749ce2006-04-10 09:05:04 +0200255 if (!offset || nr_pages > 1)
256 do_page_cache_readahead(mapping, in, index, nr_pages);
Jens Axboe5274f052006-03-30 15:15:30 +0200257
258 /*
Jens Axboe5274f052006-03-30 15:15:30 +0200259 * now fill in the holes
260 */
Jens Axboe7480a902006-04-11 13:52:47 +0200261 error = 0;
Jens Axboe16c523d2006-04-10 09:03:58 +0200262 for (i = 0; i < nr_pages; i++, index++) {
Jens Axboe7480a902006-04-11 13:52:47 +0200263find_page:
Jens Axboe5274f052006-03-30 15:15:30 +0200264 /*
Jens Axboe7480a902006-04-11 13:52:47 +0200265 * lookup the page for this index
Jens Axboe5274f052006-03-30 15:15:30 +0200266 */
Jens Axboe7480a902006-04-11 13:52:47 +0200267 page = find_get_page(mapping, index);
268 if (!page) {
269 /*
270 * If in nonblock mode then dont block on
271 * readpage (we've kicked readahead so there
272 * will be asynchronous progress):
273 */
274 if (flags & SPLICE_F_NONBLOCK)
275 break;
Jens Axboe5274f052006-03-30 15:15:30 +0200276
Jens Axboe7480a902006-04-11 13:52:47 +0200277 /*
278 * page didn't exist, allocate one
279 */
280 page = page_cache_alloc_cold(mapping);
281 if (!page)
282 break;
Jens Axboe5274f052006-03-30 15:15:30 +0200283
Jens Axboe7480a902006-04-11 13:52:47 +0200284 error = add_to_page_cache_lru(page, mapping, index,
285 mapping_gfp_mask(mapping));
Jens Axboe5274f052006-03-30 15:15:30 +0200286 if (unlikely(error)) {
287 page_cache_release(page);
288 break;
289 }
Jens Axboe7480a902006-04-11 13:52:47 +0200290
291 goto readpage;
Jens Axboe5274f052006-03-30 15:15:30 +0200292 }
Jens Axboe7480a902006-04-11 13:52:47 +0200293
294 /*
295 * If the page isn't uptodate, we may need to start io on it
296 */
297 if (!PageUptodate(page)) {
298 lock_page(page);
299
300 /*
301 * page was truncated, stop here. if this isn't the
302 * first page, we'll just complete what we already
303 * added
304 */
305 if (!page->mapping) {
306 unlock_page(page);
307 page_cache_release(page);
308 break;
309 }
310 /*
311 * page was already under io and is now done, great
312 */
313 if (PageUptodate(page)) {
314 unlock_page(page);
315 goto fill_it;
316 }
317
318readpage:
319 /*
320 * need to read in the page
321 */
322 error = mapping->a_ops->readpage(in, page);
323
324 if (unlikely(error)) {
325 page_cache_release(page);
326 if (error == AOP_TRUNCATED_PAGE)
327 goto find_page;
328 break;
329 }
330 }
331fill_it:
Jens Axboe16c523d2006-04-10 09:03:58 +0200332 pages[i] = page;
Jens Axboe5274f052006-03-30 15:15:30 +0200333 }
334
Jens Axboe16c523d2006-04-10 09:03:58 +0200335 if (i)
336 return move_to_pipe(pipe, pages, i, offset, len, flags);
Jens Axboe5274f052006-03-30 15:15:30 +0200337
Jens Axboe7480a902006-04-11 13:52:47 +0200338 return error;
Jens Axboe5274f052006-03-30 15:15:30 +0200339}
340
Jens Axboe83f91352006-04-02 23:05:09 +0200341/**
342 * generic_file_splice_read - splice data from file to a pipe
343 * @in: file to splice from
344 * @pipe: pipe to splice to
345 * @len: number of bytes to splice
346 * @flags: splice modifier flags
347 *
348 * Will read pages from given file and fill them into a pipe.
349 *
350 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200351ssize_t generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
Jens Axboe5274f052006-03-30 15:15:30 +0200352 size_t len, unsigned int flags)
353{
354 ssize_t spliced;
355 int ret;
356
357 ret = 0;
358 spliced = 0;
Ingo Molnar3a326a22006-04-10 15:18:35 +0200359
Jens Axboe5274f052006-03-30 15:15:30 +0200360 while (len) {
Linus Torvalds29e35092006-04-02 12:46:35 -0700361 ret = __generic_file_splice_read(in, pipe, len, flags);
Jens Axboe5274f052006-03-30 15:15:30 +0200362
363 if (ret <= 0)
364 break;
365
366 in->f_pos += ret;
367 len -= ret;
368 spliced += ret;
Linus Torvalds29e35092006-04-02 12:46:35 -0700369
370 if (!(flags & SPLICE_F_NONBLOCK))
371 continue;
372 ret = -EAGAIN;
373 break;
Jens Axboe5274f052006-03-30 15:15:30 +0200374 }
375
376 if (spliced)
377 return spliced;
378
379 return ret;
380}
381
Jens Axboe059a8f32006-04-02 23:06:05 +0200382EXPORT_SYMBOL(generic_file_splice_read);
383
Jens Axboe5274f052006-03-30 15:15:30 +0200384/*
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200385 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
386 * using sendpage().
Jens Axboe5274f052006-03-30 15:15:30 +0200387 */
388static int pipe_to_sendpage(struct pipe_inode_info *info,
389 struct pipe_buffer *buf, struct splice_desc *sd)
390{
391 struct file *file = sd->file;
392 loff_t pos = sd->pos;
393 unsigned int offset;
394 ssize_t ret;
395 void *ptr;
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200396 int more;
Jens Axboe5274f052006-03-30 15:15:30 +0200397
398 /*
399 * sub-optimal, but we are limited by the pipe ->map. we don't
400 * need a kmap'ed buffer here, we just want to make sure we
401 * have the page pinned if the pipe page originates from the
402 * page cache
403 */
404 ptr = buf->ops->map(file, info, buf);
405 if (IS_ERR(ptr))
406 return PTR_ERR(ptr);
407
408 offset = pos & ~PAGE_CACHE_MASK;
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200409 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
Jens Axboe5274f052006-03-30 15:15:30 +0200410
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200411 ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
Jens Axboe5274f052006-03-30 15:15:30 +0200412
413 buf->ops->unmap(info, buf);
414 if (ret == sd->len)
415 return 0;
416
417 return -EIO;
418}
419
420/*
421 * This is a little more tricky than the file -> pipe splicing. There are
422 * basically three cases:
423 *
424 * - Destination page already exists in the address space and there
425 * are users of it. For that case we have no other option that
426 * copying the data. Tough luck.
427 * - Destination page already exists in the address space, but there
428 * are no users of it. Make sure it's uptodate, then drop it. Fall
429 * through to last case.
430 * - Destination page does not exist, we can add the pipe page to
431 * the page cache and avoid the copy.
432 *
Jens Axboe83f91352006-04-02 23:05:09 +0200433 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
434 * sd->flags), we attempt to migrate pages from the pipe to the output
435 * file address space page cache. This is possible if no one else has
436 * the pipe page referenced outside of the pipe and page cache. If
437 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
438 * a new page in the output file page cache and fill/dirty that.
Jens Axboe5274f052006-03-30 15:15:30 +0200439 */
440static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
441 struct splice_desc *sd)
442{
443 struct file *file = sd->file;
444 struct address_space *mapping = file->f_mapping;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200445 gfp_t gfp_mask = mapping_gfp_mask(mapping);
Jens Axboe5274f052006-03-30 15:15:30 +0200446 unsigned int offset;
447 struct page *page;
Jens Axboe5274f052006-03-30 15:15:30 +0200448 pgoff_t index;
Jens Axboe5abc97a2006-03-30 15:16:46 +0200449 char *src;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200450 int ret;
Jens Axboe5274f052006-03-30 15:15:30 +0200451
452 /*
Jens Axboe49d0b212006-04-10 09:04:41 +0200453 * make sure the data in this buffer is uptodate
Jens Axboe5274f052006-03-30 15:15:30 +0200454 */
455 src = buf->ops->map(file, info, buf);
456 if (IS_ERR(src))
457 return PTR_ERR(src);
458
459 index = sd->pos >> PAGE_CACHE_SHIFT;
460 offset = sd->pos & ~PAGE_CACHE_MASK;
461
Jens Axboe5274f052006-03-30 15:15:30 +0200462 /*
Jens Axboe5abc97a2006-03-30 15:16:46 +0200463 * reuse buf page, if SPLICE_F_MOVE is set
Jens Axboe5274f052006-03-30 15:15:30 +0200464 */
Jens Axboe5abc97a2006-03-30 15:16:46 +0200465 if (sd->flags & SPLICE_F_MOVE) {
Jens Axboe83f91352006-04-02 23:05:09 +0200466 /*
467 * If steal succeeds, buf->page is now pruned from the vm
468 * side (LRU and page cache) and we can reuse it.
469 */
Jens Axboe5abc97a2006-03-30 15:16:46 +0200470 if (buf->ops->steal(info, buf))
471 goto find_page;
Jens Axboe5274f052006-03-30 15:15:30 +0200472
Jens Axboe49d0b212006-04-10 09:04:41 +0200473 /*
474 * this will also set the page locked
475 */
Jens Axboe5abc97a2006-03-30 15:16:46 +0200476 page = buf->page;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200477 if (add_to_page_cache(page, mapping, index, gfp_mask))
Jens Axboe5abc97a2006-03-30 15:16:46 +0200478 goto find_page;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200479
480 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
481 lru_cache_add(page);
Jens Axboe5abc97a2006-03-30 15:16:46 +0200482 } else {
483find_page:
484 ret = -ENOMEM;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200485 page = find_or_create_page(mapping, index, gfp_mask);
Jens Axboe5abc97a2006-03-30 15:16:46 +0200486 if (!page)
Dave Jones9aefe432006-04-10 09:02:40 +0200487 goto out_nomem;
Jens Axboe5274f052006-03-30 15:15:30 +0200488
Jens Axboe5abc97a2006-03-30 15:16:46 +0200489 /*
490 * If the page is uptodate, it is also locked. If it isn't
491 * uptodate, we can mark it uptodate if we are filling the
492 * full page. Otherwise we need to read it in first...
493 */
494 if (!PageUptodate(page)) {
495 if (sd->len < PAGE_CACHE_SIZE) {
496 ret = mapping->a_ops->readpage(file, page);
497 if (unlikely(ret))
498 goto out;
499
500 lock_page(page);
501
502 if (!PageUptodate(page)) {
503 /*
504 * page got invalidated, repeat
505 */
506 if (!page->mapping) {
507 unlock_page(page);
508 page_cache_release(page);
509 goto find_page;
510 }
511 ret = -EIO;
512 goto out;
Jens Axboe5274f052006-03-30 15:15:30 +0200513 }
Jens Axboe5abc97a2006-03-30 15:16:46 +0200514 } else {
515 WARN_ON(!PageLocked(page));
516 SetPageUptodate(page);
Jens Axboe5274f052006-03-30 15:15:30 +0200517 }
Jens Axboe5274f052006-03-30 15:15:30 +0200518 }
519 }
520
521 ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200522 if (ret == AOP_TRUNCATED_PAGE) {
523 page_cache_release(page);
524 goto find_page;
525 } else if (ret)
Jens Axboe5274f052006-03-30 15:15:30 +0200526 goto out;
527
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200528 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
Jens Axboe5abc97a2006-03-30 15:16:46 +0200529 char *dst = kmap_atomic(page, KM_USER0);
530
531 memcpy(dst + offset, src + buf->offset, sd->len);
532 flush_dcache_page(page);
533 kunmap_atomic(dst, KM_USER0);
534 }
Jens Axboe5274f052006-03-30 15:15:30 +0200535
536 ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200537 if (ret == AOP_TRUNCATED_PAGE) {
538 page_cache_release(page);
539 goto find_page;
540 } else if (ret)
Jens Axboe5274f052006-03-30 15:15:30 +0200541 goto out;
542
Jens Axboec7f21e42006-04-10 09:01:01 +0200543 mark_page_accessed(page);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200544 balance_dirty_pages_ratelimited(mapping);
Jens Axboe5274f052006-03-30 15:15:30 +0200545out:
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200546 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
Jens Axboe5abc97a2006-03-30 15:16:46 +0200547 page_cache_release(page);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200548 unlock_page(page);
549 }
Dave Jones9aefe432006-04-10 09:02:40 +0200550out_nomem:
Jens Axboe5274f052006-03-30 15:15:30 +0200551 buf->ops->unmap(info, buf);
552 return ret;
553}
554
555typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
556 struct splice_desc *);
557
Jens Axboe83f91352006-04-02 23:05:09 +0200558/*
559 * Pipe input worker. Most of this logic works like a regular pipe, the
560 * key here is the 'actor' worker passed in that actually moves the data
561 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
562 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200563static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
Jens Axboe5274f052006-03-30 15:15:30 +0200564 size_t len, unsigned int flags,
565 splice_actor *actor)
566{
Jens Axboe5274f052006-03-30 15:15:30 +0200567 int ret, do_wakeup, err;
568 struct splice_desc sd;
569
570 ret = 0;
571 do_wakeup = 0;
572
573 sd.total_len = len;
574 sd.flags = flags;
575 sd.file = out;
576 sd.pos = out->f_pos;
577
Ingo Molnar3a326a22006-04-10 15:18:35 +0200578 if (pipe->inode)
579 mutex_lock(&pipe->inode->i_mutex);
Jens Axboe5274f052006-03-30 15:15:30 +0200580
Jens Axboe5274f052006-03-30 15:15:30 +0200581 for (;;) {
Jens Axboe6f767b02006-04-11 13:53:56 +0200582 if (pipe->nrbufs) {
583 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
Jens Axboe5274f052006-03-30 15:15:30 +0200584 struct pipe_buf_operations *ops = buf->ops;
585
586 sd.len = buf->len;
587 if (sd.len > sd.total_len)
588 sd.len = sd.total_len;
589
Ingo Molnar3a326a22006-04-10 15:18:35 +0200590 err = actor(pipe, buf, &sd);
Jens Axboe5274f052006-03-30 15:15:30 +0200591 if (err) {
592 if (!ret && err != -ENODATA)
593 ret = err;
594
595 break;
596 }
597
598 ret += sd.len;
599 buf->offset += sd.len;
600 buf->len -= sd.len;
601 if (!buf->len) {
602 buf->ops = NULL;
Ingo Molnar3a326a22006-04-10 15:18:35 +0200603 ops->release(pipe, buf);
Jens Axboe6f767b02006-04-11 13:53:56 +0200604 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
605 pipe->nrbufs--;
606 if (pipe->inode)
607 do_wakeup = 1;
Jens Axboe5274f052006-03-30 15:15:30 +0200608 }
609
610 sd.pos += sd.len;
611 sd.total_len -= sd.len;
612 if (!sd.total_len)
613 break;
614 }
615
Jens Axboe6f767b02006-04-11 13:53:56 +0200616 if (pipe->nrbufs)
Jens Axboe5274f052006-03-30 15:15:30 +0200617 continue;
Ingo Molnar3a326a22006-04-10 15:18:35 +0200618 if (!pipe->writers)
Jens Axboe5274f052006-03-30 15:15:30 +0200619 break;
Ingo Molnar3a326a22006-04-10 15:18:35 +0200620 if (!pipe->waiting_writers) {
Jens Axboe5274f052006-03-30 15:15:30 +0200621 if (ret)
622 break;
623 }
624
Linus Torvalds29e35092006-04-02 12:46:35 -0700625 if (flags & SPLICE_F_NONBLOCK) {
626 if (!ret)
627 ret = -EAGAIN;
628 break;
629 }
630
Jens Axboe5274f052006-03-30 15:15:30 +0200631 if (signal_pending(current)) {
632 if (!ret)
633 ret = -ERESTARTSYS;
634 break;
635 }
636
637 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200638 smp_mb();
Ingo Molnar3a326a22006-04-10 15:18:35 +0200639 if (waitqueue_active(&pipe->wait))
640 wake_up_interruptible_sync(&pipe->wait);
641 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
Jens Axboe5274f052006-03-30 15:15:30 +0200642 do_wakeup = 0;
643 }
644
Ingo Molnar3a326a22006-04-10 15:18:35 +0200645 pipe_wait(pipe);
Jens Axboe5274f052006-03-30 15:15:30 +0200646 }
647
Ingo Molnar3a326a22006-04-10 15:18:35 +0200648 if (pipe->inode)
649 mutex_unlock(&pipe->inode->i_mutex);
Jens Axboe5274f052006-03-30 15:15:30 +0200650
651 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200652 smp_mb();
Ingo Molnar3a326a22006-04-10 15:18:35 +0200653 if (waitqueue_active(&pipe->wait))
654 wake_up_interruptible(&pipe->wait);
655 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
Jens Axboe5274f052006-03-30 15:15:30 +0200656 }
657
Jens Axboe5274f052006-03-30 15:15:30 +0200658 out->f_pos = sd.pos;
Jens Axboe5274f052006-03-30 15:15:30 +0200659 return ret;
660
661}
662
Jens Axboe83f91352006-04-02 23:05:09 +0200663/**
664 * generic_file_splice_write - splice data from a pipe to a file
Ingo Molnar3a326a22006-04-10 15:18:35 +0200665 * @pipe: pipe info
Jens Axboe83f91352006-04-02 23:05:09 +0200666 * @out: file to write to
667 * @len: number of bytes to splice
668 * @flags: splice modifier flags
669 *
670 * Will either move or copy pages (determined by @flags options) from
671 * the given pipe inode to the given file.
672 *
673 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200674ssize_t
675generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
676 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200677{
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200678 struct address_space *mapping = out->f_mapping;
Ingo Molnar3a326a22006-04-10 15:18:35 +0200679 ssize_t ret;
680
681 ret = move_from_pipe(pipe, out, len, flags, pipe_to_file);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200682
683 /*
684 * if file or inode is SYNC and we actually wrote some data, sync it
685 */
686 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
687 && ret > 0) {
688 struct inode *inode = mapping->host;
689 int err;
690
691 mutex_lock(&inode->i_mutex);
692 err = generic_osync_inode(mapping->host, mapping,
693 OSYNC_METADATA|OSYNC_DATA);
694 mutex_unlock(&inode->i_mutex);
695
696 if (err)
697 ret = err;
698 }
699
700 return ret;
Jens Axboe5274f052006-03-30 15:15:30 +0200701}
702
Jens Axboe059a8f32006-04-02 23:06:05 +0200703EXPORT_SYMBOL(generic_file_splice_write);
704
Jens Axboe83f91352006-04-02 23:05:09 +0200705/**
706 * generic_splice_sendpage - splice data from a pipe to a socket
707 * @inode: pipe inode
708 * @out: socket to write to
709 * @len: number of bytes to splice
710 * @flags: splice modifier flags
711 *
712 * Will send @len bytes from the pipe to a network socket. No data copying
713 * is involved.
714 *
715 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200716ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
Jens Axboe5274f052006-03-30 15:15:30 +0200717 size_t len, unsigned int flags)
718{
Ingo Molnar3a326a22006-04-10 15:18:35 +0200719 return move_from_pipe(pipe, out, len, flags, pipe_to_sendpage);
Jens Axboe5274f052006-03-30 15:15:30 +0200720}
721
Jens Axboe059a8f32006-04-02 23:06:05 +0200722EXPORT_SYMBOL(generic_splice_sendpage);
Jeff Garzika0f06782006-03-30 23:06:13 -0500723
Jens Axboe83f91352006-04-02 23:05:09 +0200724/*
725 * Attempt to initiate a splice from pipe to file.
726 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200727static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
Jens Axboeb92ce552006-04-11 13:52:07 +0200728 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200729{
730 loff_t pos;
731 int ret;
732
733 if (!out->f_op || !out->f_op->splice_write)
734 return -EINVAL;
735
736 if (!(out->f_mode & FMODE_WRITE))
737 return -EBADF;
738
739 pos = out->f_pos;
Ingo Molnar529565d2006-04-10 15:18:58 +0200740
Jens Axboe5274f052006-03-30 15:15:30 +0200741 ret = rw_verify_area(WRITE, out, &pos, len);
742 if (unlikely(ret < 0))
743 return ret;
744
745 return out->f_op->splice_write(pipe, out, len, flags);
746}
747
Jens Axboe83f91352006-04-02 23:05:09 +0200748/*
749 * Attempt to initiate a splice from a file to a pipe.
750 */
Jens Axboeb92ce552006-04-11 13:52:07 +0200751static long do_splice_to(struct file *in, struct pipe_inode_info *pipe,
752 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200753{
754 loff_t pos, isize, left;
755 int ret;
756
757 if (!in->f_op || !in->f_op->splice_read)
758 return -EINVAL;
759
760 if (!(in->f_mode & FMODE_READ))
761 return -EBADF;
762
763 pos = in->f_pos;
Ingo Molnar529565d2006-04-10 15:18:58 +0200764
Jens Axboe5274f052006-03-30 15:15:30 +0200765 ret = rw_verify_area(READ, in, &pos, len);
766 if (unlikely(ret < 0))
767 return ret;
768
769 isize = i_size_read(in->f_mapping->host);
770 if (unlikely(in->f_pos >= isize))
771 return 0;
772
773 left = isize - in->f_pos;
774 if (left < len)
775 len = left;
776
777 return in->f_op->splice_read(in, pipe, len, flags);
778}
779
Jens Axboeb92ce552006-04-11 13:52:07 +0200780long do_splice_direct(struct file *in, struct file *out, size_t len,
781 unsigned int flags)
782{
783 struct pipe_inode_info *pipe;
784 long ret, bytes;
785 umode_t i_mode;
786 int i;
787
788 /*
789 * We require the input being a regular file, as we don't want to
790 * randomly drop data for eg socket -> socket splicing. Use the
791 * piped splicing for that!
792 */
793 i_mode = in->f_dentry->d_inode->i_mode;
794 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
795 return -EINVAL;
796
797 /*
798 * neither in nor out is a pipe, setup an internal pipe attached to
799 * 'out' and transfer the wanted data from 'in' to 'out' through that
800 */
801 pipe = current->splice_pipe;
802 if (!pipe) {
803 pipe = alloc_pipe_info(NULL);
804 if (!pipe)
805 return -ENOMEM;
806
807 /*
808 * We don't have an immediate reader, but we'll read the stuff
809 * out of the pipe right after the move_to_pipe(). So set
810 * PIPE_READERS appropriately.
811 */
812 pipe->readers = 1;
813
814 current->splice_pipe = pipe;
815 }
816
817 /*
818 * do the splice
819 */
820 ret = 0;
821 bytes = 0;
822
823 while (len) {
824 size_t read_len, max_read_len;
825
826 /*
827 * Do at most PIPE_BUFFERS pages worth of transfer:
828 */
829 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
830
831 ret = do_splice_to(in, pipe, max_read_len, flags);
832 if (unlikely(ret < 0))
833 goto out_release;
834
835 read_len = ret;
836
837 /*
838 * NOTE: nonblocking mode only applies to the input. We
839 * must not do the output in nonblocking mode as then we
840 * could get stuck data in the internal pipe:
841 */
842 ret = do_splice_from(pipe, out, read_len,
843 flags & ~SPLICE_F_NONBLOCK);
844 if (unlikely(ret < 0))
845 goto out_release;
846
847 bytes += ret;
848 len -= ret;
849
850 /*
851 * In nonblocking mode, if we got back a short read then
852 * that was due to either an IO error or due to the
853 * pagecache entry not being there. In the IO error case
854 * the _next_ splice attempt will produce a clean IO error
855 * return value (not a short read), so in both cases it's
856 * correct to break out of the loop here:
857 */
858 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
859 break;
860 }
861
862 pipe->nrbufs = pipe->curbuf = 0;
863
864 return bytes;
865
866out_release:
867 /*
868 * If we did an incomplete transfer we must release
869 * the pipe buffers in question:
870 */
871 for (i = 0; i < PIPE_BUFFERS; i++) {
872 struct pipe_buffer *buf = pipe->bufs + i;
873
874 if (buf->ops) {
875 buf->ops->release(pipe, buf);
876 buf->ops = NULL;
877 }
878 }
879 pipe->nrbufs = pipe->curbuf = 0;
880
881 /*
882 * If we transferred some data, return the number of bytes:
883 */
884 if (bytes > 0)
885 return bytes;
886
887 return ret;
888}
889
890EXPORT_SYMBOL(do_splice_direct);
891
Jens Axboe83f91352006-04-02 23:05:09 +0200892/*
893 * Determine where to splice to/from.
894 */
Ingo Molnar529565d2006-04-10 15:18:58 +0200895static long do_splice(struct file *in, loff_t __user *off_in,
896 struct file *out, loff_t __user *off_out,
897 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200898{
Ingo Molnar3a326a22006-04-10 15:18:35 +0200899 struct pipe_inode_info *pipe;
Jens Axboe5274f052006-03-30 15:15:30 +0200900
Ingo Molnar3a326a22006-04-10 15:18:35 +0200901 pipe = in->f_dentry->d_inode->i_pipe;
Ingo Molnar529565d2006-04-10 15:18:58 +0200902 if (pipe) {
903 if (off_in)
904 return -ESPIPE;
Jens Axboeb92ce552006-04-11 13:52:07 +0200905 if (off_out) {
906 if (out->f_op->llseek == no_llseek)
907 return -EINVAL;
908 if (copy_from_user(&out->f_pos, off_out,
909 sizeof(loff_t)))
910 return -EFAULT;
911 }
Ingo Molnar529565d2006-04-10 15:18:58 +0200912
Jens Axboeb92ce552006-04-11 13:52:07 +0200913 return do_splice_from(pipe, out, len, flags);
Ingo Molnar529565d2006-04-10 15:18:58 +0200914 }
Jens Axboe5274f052006-03-30 15:15:30 +0200915
Ingo Molnar3a326a22006-04-10 15:18:35 +0200916 pipe = out->f_dentry->d_inode->i_pipe;
Ingo Molnar529565d2006-04-10 15:18:58 +0200917 if (pipe) {
918 if (off_out)
919 return -ESPIPE;
Jens Axboeb92ce552006-04-11 13:52:07 +0200920 if (off_in) {
921 if (in->f_op->llseek == no_llseek)
922 return -EINVAL;
923 if (copy_from_user(&in->f_pos, off_in, sizeof(loff_t)))
924 return -EFAULT;
925 }
Ingo Molnar529565d2006-04-10 15:18:58 +0200926
Jens Axboeb92ce552006-04-11 13:52:07 +0200927 return do_splice_to(in, pipe, len, flags);
Ingo Molnar529565d2006-04-10 15:18:58 +0200928 }
Jens Axboe5274f052006-03-30 15:15:30 +0200929
930 return -EINVAL;
931}
932
Ingo Molnar529565d2006-04-10 15:18:58 +0200933asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
934 int fd_out, loff_t __user *off_out,
935 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200936{
937 long error;
938 struct file *in, *out;
939 int fput_in, fput_out;
940
941 if (unlikely(!len))
942 return 0;
943
944 error = -EBADF;
Ingo Molnar529565d2006-04-10 15:18:58 +0200945 in = fget_light(fd_in, &fput_in);
Jens Axboe5274f052006-03-30 15:15:30 +0200946 if (in) {
947 if (in->f_mode & FMODE_READ) {
Ingo Molnar529565d2006-04-10 15:18:58 +0200948 out = fget_light(fd_out, &fput_out);
Jens Axboe5274f052006-03-30 15:15:30 +0200949 if (out) {
950 if (out->f_mode & FMODE_WRITE)
Ingo Molnar529565d2006-04-10 15:18:58 +0200951 error = do_splice(in, off_in,
952 out, off_out,
953 len, flags);
Jens Axboe5274f052006-03-30 15:15:30 +0200954 fput_light(out, fput_out);
955 }
956 }
957
958 fput_light(in, fput_in);
959 }
960
961 return error;
962}