blob: e30743c2c06a87810367015c4ea861efb119acf6 [file] [log] [blame]
Jens Axboe5274f052006-03-30 15:15:30 +02001/*
2 * "splice": joining two ropes together by interweaving their strands.
3 *
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
7 *
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
10 *
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files and fixing the initial implementation
13 * bugs.
14 *
15 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
17 *
18 */
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/pagemap.h>
22#include <linux/pipe_fs_i.h>
23#include <linux/mm_inline.h>
Jens Axboe5abc97a2006-03-30 15:16:46 +020024#include <linux/swap.h>
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020025#include <linux/writeback.h>
26#include <linux/buffer_head.h>
Jeff Garzika0f06782006-03-30 23:06:13 -050027#include <linux/module.h>
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020028#include <linux/syscalls.h>
Jens Axboe5274f052006-03-30 15:15:30 +020029
30/*
31 * Passed to the actors
32 */
33struct splice_desc {
34 unsigned int len, total_len; /* current and remaining length */
35 unsigned int flags; /* splice flags */
36 struct file *file; /* file to read/write */
37 loff_t pos; /* file position */
38};
39
Jens Axboe83f91352006-04-02 23:05:09 +020040/*
41 * Attempt to steal a page from a pipe buffer. This should perhaps go into
42 * a vm helper function, it's already simplified quite a bit by the
43 * addition of remove_mapping(). If success is returned, the caller may
44 * attempt to reuse this page for another destination.
45 */
Jens Axboe5abc97a2006-03-30 15:16:46 +020046static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47 struct pipe_buffer *buf)
48{
49 struct page *page = buf->page;
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020050 struct address_space *mapping = page_mapping(page);
Jens Axboe5abc97a2006-03-30 15:16:46 +020051
52 WARN_ON(!PageLocked(page));
53 WARN_ON(!PageUptodate(page));
54
Jens Axboead8d6f02006-04-02 23:10:32 +020055 /*
56 * At least for ext2 with nobh option, we need to wait on writeback
57 * completing on this page, since we'll remove it from the pagecache.
58 * Otherwise truncate wont wait on the page, allowing the disk
59 * blocks to be reused by someone else before we actually wrote our
60 * data to them. fs corruption ensues.
61 */
62 wait_on_page_writeback(page);
63
Jens Axboe4f6f0bd2006-04-02 23:04:46 +020064 if (PagePrivate(page))
65 try_to_release_page(page, mapping_gfp_mask(mapping));
66
67 if (!remove_mapping(mapping, page))
Jens Axboe5abc97a2006-03-30 15:16:46 +020068 return 1;
69
Jens Axboe3e7ee3e2006-04-02 23:11:04 +020070 buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
Jens Axboe5abc97a2006-03-30 15:16:46 +020071 return 0;
72}
73
Jens Axboe5274f052006-03-30 15:15:30 +020074static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
75 struct pipe_buffer *buf)
76{
77 page_cache_release(buf->page);
78 buf->page = NULL;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +020079 buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
Jens Axboe5274f052006-03-30 15:15:30 +020080}
81
82static void *page_cache_pipe_buf_map(struct file *file,
83 struct pipe_inode_info *info,
84 struct pipe_buffer *buf)
85{
86 struct page *page = buf->page;
Jens Axboe49d0b212006-04-10 09:04:41 +020087 int err;
Jens Axboe5274f052006-03-30 15:15:30 +020088
89 if (!PageUptodate(page)) {
Jens Axboe49d0b212006-04-10 09:04:41 +020090 lock_page(page);
91
92 /*
93 * Page got truncated/unhashed. This will cause a 0-byte
94 * splice, if this is the first page
95 */
96 if (!page->mapping) {
97 err = -ENODATA;
98 goto error;
99 }
100
101 /*
102 * uh oh, read-error from disk
103 */
104 if (!PageUptodate(page)) {
105 err = -EIO;
106 goto error;
107 }
108
109 /*
110 * page is ok afterall, fall through to mapping
111 */
Jens Axboe5274f052006-03-30 15:15:30 +0200112 unlock_page(page);
Jens Axboe5274f052006-03-30 15:15:30 +0200113 }
114
Jens Axboe49d0b212006-04-10 09:04:41 +0200115 return kmap(page);
116error:
117 unlock_page(page);
118 return ERR_PTR(err);
Jens Axboe5274f052006-03-30 15:15:30 +0200119}
120
121static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
122 struct pipe_buffer *buf)
123{
Jens Axboe5274f052006-03-30 15:15:30 +0200124 kunmap(buf->page);
125}
126
127static struct pipe_buf_operations page_cache_pipe_buf_ops = {
128 .can_merge = 0,
129 .map = page_cache_pipe_buf_map,
130 .unmap = page_cache_pipe_buf_unmap,
131 .release = page_cache_pipe_buf_release,
Jens Axboe5abc97a2006-03-30 15:16:46 +0200132 .steal = page_cache_pipe_buf_steal,
Jens Axboe5274f052006-03-30 15:15:30 +0200133};
134
Jens Axboe83f91352006-04-02 23:05:09 +0200135/*
136 * Pipe output worker. This sets up our pipe format with the page cache
137 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
138 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200139static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
Jens Axboe5274f052006-03-30 15:15:30 +0200140 int nr_pages, unsigned long offset,
Linus Torvalds29e35092006-04-02 12:46:35 -0700141 unsigned long len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200142{
Jens Axboe5274f052006-03-30 15:15:30 +0200143 int ret, do_wakeup, i;
144
145 ret = 0;
146 do_wakeup = 0;
147 i = 0;
148
Ingo Molnar3a326a22006-04-10 15:18:35 +0200149 if (pipe->inode)
150 mutex_lock(&pipe->inode->i_mutex);
Jens Axboe5274f052006-03-30 15:15:30 +0200151
Jens Axboe5274f052006-03-30 15:15:30 +0200152 for (;;) {
153 int bufs;
154
Ingo Molnar3a326a22006-04-10 15:18:35 +0200155 if (!pipe->readers) {
Jens Axboe5274f052006-03-30 15:15:30 +0200156 send_sig(SIGPIPE, current, 0);
157 if (!ret)
158 ret = -EPIPE;
159 break;
160 }
161
Ingo Molnar3a326a22006-04-10 15:18:35 +0200162 bufs = pipe->nrbufs;
Jens Axboe5274f052006-03-30 15:15:30 +0200163 if (bufs < PIPE_BUFFERS) {
Ingo Molnar3a326a22006-04-10 15:18:35 +0200164 int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS - 1);
165 struct pipe_buffer *buf = pipe->bufs + newbuf;
Jens Axboe5274f052006-03-30 15:15:30 +0200166 struct page *page = pages[i++];
167 unsigned long this_len;
168
169 this_len = PAGE_CACHE_SIZE - offset;
170 if (this_len > len)
171 this_len = len;
172
173 buf->page = page;
174 buf->offset = offset;
175 buf->len = this_len;
176 buf->ops = &page_cache_pipe_buf_ops;
Ingo Molnar3a326a22006-04-10 15:18:35 +0200177 pipe->nrbufs = ++bufs;
Jens Axboe5274f052006-03-30 15:15:30 +0200178 do_wakeup = 1;
179
180 ret += this_len;
181 len -= this_len;
182 offset = 0;
183 if (!--nr_pages)
184 break;
185 if (!len)
186 break;
187 if (bufs < PIPE_BUFFERS)
188 continue;
189
190 break;
191 }
192
Linus Torvalds29e35092006-04-02 12:46:35 -0700193 if (flags & SPLICE_F_NONBLOCK) {
194 if (!ret)
195 ret = -EAGAIN;
196 break;
197 }
198
Jens Axboe5274f052006-03-30 15:15:30 +0200199 if (signal_pending(current)) {
200 if (!ret)
201 ret = -ERESTARTSYS;
202 break;
203 }
204
205 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200206 smp_mb();
Ingo Molnar3a326a22006-04-10 15:18:35 +0200207 if (waitqueue_active(&pipe->wait))
208 wake_up_interruptible_sync(&pipe->wait);
209 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
Jens Axboe5274f052006-03-30 15:15:30 +0200210 do_wakeup = 0;
211 }
212
Ingo Molnar3a326a22006-04-10 15:18:35 +0200213 pipe->waiting_writers++;
214 pipe_wait(pipe);
215 pipe->waiting_writers--;
Jens Axboe5274f052006-03-30 15:15:30 +0200216 }
217
Ingo Molnar3a326a22006-04-10 15:18:35 +0200218 if (pipe->inode)
219 mutex_unlock(&pipe->inode->i_mutex);
Jens Axboe5274f052006-03-30 15:15:30 +0200220
221 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200222 smp_mb();
Ingo Molnar3a326a22006-04-10 15:18:35 +0200223 if (waitqueue_active(&pipe->wait))
224 wake_up_interruptible(&pipe->wait);
225 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
Jens Axboe5274f052006-03-30 15:15:30 +0200226 }
227
228 while (i < nr_pages)
229 page_cache_release(pages[i++]);
230
231 return ret;
232}
233
Ingo Molnar3a326a22006-04-10 15:18:35 +0200234static int
235__generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
236 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200237{
238 struct address_space *mapping = in->f_mapping;
239 unsigned int offset, nr_pages;
Jens Axboe16c523d2006-04-10 09:03:58 +0200240 struct page *pages[PIPE_BUFFERS];
Jens Axboe5274f052006-03-30 15:15:30 +0200241 struct page *page;
Jens Axboe16c523d2006-04-10 09:03:58 +0200242 pgoff_t index;
Jens Axboe7480a902006-04-11 13:52:47 +0200243 int i, error;
Jens Axboe5274f052006-03-30 15:15:30 +0200244
245 index = in->f_pos >> PAGE_CACHE_SHIFT;
246 offset = in->f_pos & ~PAGE_CACHE_MASK;
247 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
248
249 if (nr_pages > PIPE_BUFFERS)
250 nr_pages = PIPE_BUFFERS;
251
252 /*
Jens Axboe0b749ce2006-04-10 09:05:04 +0200253 * initiate read-ahead on this page range. however, don't call into
254 * read-ahead if this is a non-zero offset (we are likely doing small
255 * chunk splice and the page is already there) for a single page.
Jens Axboe5274f052006-03-30 15:15:30 +0200256 */
Jens Axboe0b749ce2006-04-10 09:05:04 +0200257 if (!offset || nr_pages > 1)
258 do_page_cache_readahead(mapping, in, index, nr_pages);
Jens Axboe5274f052006-03-30 15:15:30 +0200259
260 /*
Jens Axboe5274f052006-03-30 15:15:30 +0200261 * now fill in the holes
262 */
Jens Axboe7480a902006-04-11 13:52:47 +0200263 error = 0;
Jens Axboe16c523d2006-04-10 09:03:58 +0200264 for (i = 0; i < nr_pages; i++, index++) {
Jens Axboe7480a902006-04-11 13:52:47 +0200265find_page:
Jens Axboe5274f052006-03-30 15:15:30 +0200266 /*
Jens Axboe7480a902006-04-11 13:52:47 +0200267 * lookup the page for this index
Jens Axboe5274f052006-03-30 15:15:30 +0200268 */
Jens Axboe7480a902006-04-11 13:52:47 +0200269 page = find_get_page(mapping, index);
270 if (!page) {
271 /*
272 * If in nonblock mode then dont block on
273 * readpage (we've kicked readahead so there
274 * will be asynchronous progress):
275 */
276 if (flags & SPLICE_F_NONBLOCK)
277 break;
Jens Axboe5274f052006-03-30 15:15:30 +0200278
Jens Axboe7480a902006-04-11 13:52:47 +0200279 /*
280 * page didn't exist, allocate one
281 */
282 page = page_cache_alloc_cold(mapping);
283 if (!page)
284 break;
Jens Axboe5274f052006-03-30 15:15:30 +0200285
Jens Axboe7480a902006-04-11 13:52:47 +0200286 error = add_to_page_cache_lru(page, mapping, index,
287 mapping_gfp_mask(mapping));
Jens Axboe5274f052006-03-30 15:15:30 +0200288 if (unlikely(error)) {
289 page_cache_release(page);
290 break;
291 }
Jens Axboe7480a902006-04-11 13:52:47 +0200292
293 goto readpage;
Jens Axboe5274f052006-03-30 15:15:30 +0200294 }
Jens Axboe7480a902006-04-11 13:52:47 +0200295
296 /*
297 * If the page isn't uptodate, we may need to start io on it
298 */
299 if (!PageUptodate(page)) {
300 lock_page(page);
301
302 /*
303 * page was truncated, stop here. if this isn't the
304 * first page, we'll just complete what we already
305 * added
306 */
307 if (!page->mapping) {
308 unlock_page(page);
309 page_cache_release(page);
310 break;
311 }
312 /*
313 * page was already under io and is now done, great
314 */
315 if (PageUptodate(page)) {
316 unlock_page(page);
317 goto fill_it;
318 }
319
320readpage:
321 /*
322 * need to read in the page
323 */
324 error = mapping->a_ops->readpage(in, page);
325
326 if (unlikely(error)) {
327 page_cache_release(page);
328 if (error == AOP_TRUNCATED_PAGE)
329 goto find_page;
330 break;
331 }
332 }
333fill_it:
Jens Axboe16c523d2006-04-10 09:03:58 +0200334 pages[i] = page;
Jens Axboe5274f052006-03-30 15:15:30 +0200335 }
336
Jens Axboe16c523d2006-04-10 09:03:58 +0200337 if (i)
338 return move_to_pipe(pipe, pages, i, offset, len, flags);
Jens Axboe5274f052006-03-30 15:15:30 +0200339
Jens Axboe7480a902006-04-11 13:52:47 +0200340 return error;
Jens Axboe5274f052006-03-30 15:15:30 +0200341}
342
Jens Axboe83f91352006-04-02 23:05:09 +0200343/**
344 * generic_file_splice_read - splice data from file to a pipe
345 * @in: file to splice from
346 * @pipe: pipe to splice to
347 * @len: number of bytes to splice
348 * @flags: splice modifier flags
349 *
350 * Will read pages from given file and fill them into a pipe.
351 *
352 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200353ssize_t generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
Jens Axboe5274f052006-03-30 15:15:30 +0200354 size_t len, unsigned int flags)
355{
356 ssize_t spliced;
357 int ret;
358
359 ret = 0;
360 spliced = 0;
Ingo Molnar3a326a22006-04-10 15:18:35 +0200361
Jens Axboe5274f052006-03-30 15:15:30 +0200362 while (len) {
Linus Torvalds29e35092006-04-02 12:46:35 -0700363 ret = __generic_file_splice_read(in, pipe, len, flags);
Jens Axboe5274f052006-03-30 15:15:30 +0200364
365 if (ret <= 0)
366 break;
367
368 in->f_pos += ret;
369 len -= ret;
370 spliced += ret;
Linus Torvalds29e35092006-04-02 12:46:35 -0700371
372 if (!(flags & SPLICE_F_NONBLOCK))
373 continue;
374 ret = -EAGAIN;
375 break;
Jens Axboe5274f052006-03-30 15:15:30 +0200376 }
377
378 if (spliced)
379 return spliced;
380
381 return ret;
382}
383
Jens Axboe059a8f32006-04-02 23:06:05 +0200384EXPORT_SYMBOL(generic_file_splice_read);
385
Jens Axboe5274f052006-03-30 15:15:30 +0200386/*
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200387 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
388 * using sendpage().
Jens Axboe5274f052006-03-30 15:15:30 +0200389 */
390static int pipe_to_sendpage(struct pipe_inode_info *info,
391 struct pipe_buffer *buf, struct splice_desc *sd)
392{
393 struct file *file = sd->file;
394 loff_t pos = sd->pos;
395 unsigned int offset;
396 ssize_t ret;
397 void *ptr;
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200398 int more;
Jens Axboe5274f052006-03-30 15:15:30 +0200399
400 /*
401 * sub-optimal, but we are limited by the pipe ->map. we don't
402 * need a kmap'ed buffer here, we just want to make sure we
403 * have the page pinned if the pipe page originates from the
404 * page cache
405 */
406 ptr = buf->ops->map(file, info, buf);
407 if (IS_ERR(ptr))
408 return PTR_ERR(ptr);
409
410 offset = pos & ~PAGE_CACHE_MASK;
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200411 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
Jens Axboe5274f052006-03-30 15:15:30 +0200412
Jens Axboeb2b39fa2006-04-02 23:05:41 +0200413 ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
Jens Axboe5274f052006-03-30 15:15:30 +0200414
415 buf->ops->unmap(info, buf);
416 if (ret == sd->len)
417 return 0;
418
419 return -EIO;
420}
421
422/*
423 * This is a little more tricky than the file -> pipe splicing. There are
424 * basically three cases:
425 *
426 * - Destination page already exists in the address space and there
427 * are users of it. For that case we have no other option that
428 * copying the data. Tough luck.
429 * - Destination page already exists in the address space, but there
430 * are no users of it. Make sure it's uptodate, then drop it. Fall
431 * through to last case.
432 * - Destination page does not exist, we can add the pipe page to
433 * the page cache and avoid the copy.
434 *
Jens Axboe83f91352006-04-02 23:05:09 +0200435 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
436 * sd->flags), we attempt to migrate pages from the pipe to the output
437 * file address space page cache. This is possible if no one else has
438 * the pipe page referenced outside of the pipe and page cache. If
439 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
440 * a new page in the output file page cache and fill/dirty that.
Jens Axboe5274f052006-03-30 15:15:30 +0200441 */
442static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
443 struct splice_desc *sd)
444{
445 struct file *file = sd->file;
446 struct address_space *mapping = file->f_mapping;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200447 gfp_t gfp_mask = mapping_gfp_mask(mapping);
Jens Axboe5274f052006-03-30 15:15:30 +0200448 unsigned int offset;
449 struct page *page;
Jens Axboe5274f052006-03-30 15:15:30 +0200450 pgoff_t index;
Jens Axboe5abc97a2006-03-30 15:16:46 +0200451 char *src;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200452 int ret;
Jens Axboe5274f052006-03-30 15:15:30 +0200453
454 /*
Jens Axboe49d0b212006-04-10 09:04:41 +0200455 * make sure the data in this buffer is uptodate
Jens Axboe5274f052006-03-30 15:15:30 +0200456 */
457 src = buf->ops->map(file, info, buf);
458 if (IS_ERR(src))
459 return PTR_ERR(src);
460
461 index = sd->pos >> PAGE_CACHE_SHIFT;
462 offset = sd->pos & ~PAGE_CACHE_MASK;
463
Jens Axboe5274f052006-03-30 15:15:30 +0200464 /*
Jens Axboe5abc97a2006-03-30 15:16:46 +0200465 * reuse buf page, if SPLICE_F_MOVE is set
Jens Axboe5274f052006-03-30 15:15:30 +0200466 */
Jens Axboe5abc97a2006-03-30 15:16:46 +0200467 if (sd->flags & SPLICE_F_MOVE) {
Jens Axboe83f91352006-04-02 23:05:09 +0200468 /*
469 * If steal succeeds, buf->page is now pruned from the vm
470 * side (LRU and page cache) and we can reuse it.
471 */
Jens Axboe5abc97a2006-03-30 15:16:46 +0200472 if (buf->ops->steal(info, buf))
473 goto find_page;
Jens Axboe5274f052006-03-30 15:15:30 +0200474
Jens Axboe49d0b212006-04-10 09:04:41 +0200475 /*
476 * this will also set the page locked
477 */
Jens Axboe5abc97a2006-03-30 15:16:46 +0200478 page = buf->page;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200479 if (add_to_page_cache(page, mapping, index, gfp_mask))
Jens Axboe5abc97a2006-03-30 15:16:46 +0200480 goto find_page;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200481
482 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
483 lru_cache_add(page);
Jens Axboe5abc97a2006-03-30 15:16:46 +0200484 } else {
485find_page:
486 ret = -ENOMEM;
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200487 page = find_or_create_page(mapping, index, gfp_mask);
Jens Axboe5abc97a2006-03-30 15:16:46 +0200488 if (!page)
Dave Jones9aefe432006-04-10 09:02:40 +0200489 goto out_nomem;
Jens Axboe5274f052006-03-30 15:15:30 +0200490
Jens Axboe5abc97a2006-03-30 15:16:46 +0200491 /*
492 * If the page is uptodate, it is also locked. If it isn't
493 * uptodate, we can mark it uptodate if we are filling the
494 * full page. Otherwise we need to read it in first...
495 */
496 if (!PageUptodate(page)) {
497 if (sd->len < PAGE_CACHE_SIZE) {
498 ret = mapping->a_ops->readpage(file, page);
499 if (unlikely(ret))
500 goto out;
501
502 lock_page(page);
503
504 if (!PageUptodate(page)) {
505 /*
506 * page got invalidated, repeat
507 */
508 if (!page->mapping) {
509 unlock_page(page);
510 page_cache_release(page);
511 goto find_page;
512 }
513 ret = -EIO;
514 goto out;
Jens Axboe5274f052006-03-30 15:15:30 +0200515 }
Jens Axboe5abc97a2006-03-30 15:16:46 +0200516 } else {
517 WARN_ON(!PageLocked(page));
518 SetPageUptodate(page);
Jens Axboe5274f052006-03-30 15:15:30 +0200519 }
Jens Axboe5274f052006-03-30 15:15:30 +0200520 }
521 }
522
523 ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200524 if (ret == AOP_TRUNCATED_PAGE) {
525 page_cache_release(page);
526 goto find_page;
527 } else if (ret)
Jens Axboe5274f052006-03-30 15:15:30 +0200528 goto out;
529
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200530 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
Jens Axboe5abc97a2006-03-30 15:16:46 +0200531 char *dst = kmap_atomic(page, KM_USER0);
532
533 memcpy(dst + offset, src + buf->offset, sd->len);
534 flush_dcache_page(page);
535 kunmap_atomic(dst, KM_USER0);
536 }
Jens Axboe5274f052006-03-30 15:15:30 +0200537
538 ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200539 if (ret == AOP_TRUNCATED_PAGE) {
540 page_cache_release(page);
541 goto find_page;
542 } else if (ret)
Jens Axboe5274f052006-03-30 15:15:30 +0200543 goto out;
544
Jens Axboec7f21e42006-04-10 09:01:01 +0200545 mark_page_accessed(page);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200546 balance_dirty_pages_ratelimited(mapping);
Jens Axboe5274f052006-03-30 15:15:30 +0200547out:
Jens Axboe3e7ee3e2006-04-02 23:11:04 +0200548 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
Jens Axboe5abc97a2006-03-30 15:16:46 +0200549 page_cache_release(page);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200550 unlock_page(page);
551 }
Dave Jones9aefe432006-04-10 09:02:40 +0200552out_nomem:
Jens Axboe5274f052006-03-30 15:15:30 +0200553 buf->ops->unmap(info, buf);
554 return ret;
555}
556
557typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
558 struct splice_desc *);
559
Jens Axboe83f91352006-04-02 23:05:09 +0200560/*
561 * Pipe input worker. Most of this logic works like a regular pipe, the
562 * key here is the 'actor' worker passed in that actually moves the data
563 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
564 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200565static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
Jens Axboe5274f052006-03-30 15:15:30 +0200566 size_t len, unsigned int flags,
567 splice_actor *actor)
568{
Jens Axboe5274f052006-03-30 15:15:30 +0200569 int ret, do_wakeup, err;
570 struct splice_desc sd;
571
572 ret = 0;
573 do_wakeup = 0;
574
575 sd.total_len = len;
576 sd.flags = flags;
577 sd.file = out;
578 sd.pos = out->f_pos;
579
Ingo Molnar3a326a22006-04-10 15:18:35 +0200580 if (pipe->inode)
581 mutex_lock(&pipe->inode->i_mutex);
Jens Axboe5274f052006-03-30 15:15:30 +0200582
Jens Axboe5274f052006-03-30 15:15:30 +0200583 for (;;) {
Ingo Molnar3a326a22006-04-10 15:18:35 +0200584 int bufs = pipe->nrbufs;
Jens Axboe5274f052006-03-30 15:15:30 +0200585
586 if (bufs) {
Ingo Molnar3a326a22006-04-10 15:18:35 +0200587 int curbuf = pipe->curbuf;
588 struct pipe_buffer *buf = pipe->bufs + curbuf;
Jens Axboe5274f052006-03-30 15:15:30 +0200589 struct pipe_buf_operations *ops = buf->ops;
590
591 sd.len = buf->len;
592 if (sd.len > sd.total_len)
593 sd.len = sd.total_len;
594
Ingo Molnar3a326a22006-04-10 15:18:35 +0200595 err = actor(pipe, buf, &sd);
Jens Axboe5274f052006-03-30 15:15:30 +0200596 if (err) {
597 if (!ret && err != -ENODATA)
598 ret = err;
599
600 break;
601 }
602
603 ret += sd.len;
604 buf->offset += sd.len;
605 buf->len -= sd.len;
606 if (!buf->len) {
607 buf->ops = NULL;
Ingo Molnar3a326a22006-04-10 15:18:35 +0200608 ops->release(pipe, buf);
Jens Axboe5274f052006-03-30 15:15:30 +0200609 curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
Ingo Molnar3a326a22006-04-10 15:18:35 +0200610 pipe->curbuf = curbuf;
611 pipe->nrbufs = --bufs;
Jens Axboe5274f052006-03-30 15:15:30 +0200612 do_wakeup = 1;
613 }
614
615 sd.pos += sd.len;
616 sd.total_len -= sd.len;
617 if (!sd.total_len)
618 break;
619 }
620
621 if (bufs)
622 continue;
Ingo Molnar3a326a22006-04-10 15:18:35 +0200623 if (!pipe->writers)
Jens Axboe5274f052006-03-30 15:15:30 +0200624 break;
Ingo Molnar3a326a22006-04-10 15:18:35 +0200625 if (!pipe->waiting_writers) {
Jens Axboe5274f052006-03-30 15:15:30 +0200626 if (ret)
627 break;
628 }
629
Linus Torvalds29e35092006-04-02 12:46:35 -0700630 if (flags & SPLICE_F_NONBLOCK) {
631 if (!ret)
632 ret = -EAGAIN;
633 break;
634 }
635
Jens Axboe5274f052006-03-30 15:15:30 +0200636 if (signal_pending(current)) {
637 if (!ret)
638 ret = -ERESTARTSYS;
639 break;
640 }
641
642 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200643 smp_mb();
Ingo Molnar3a326a22006-04-10 15:18:35 +0200644 if (waitqueue_active(&pipe->wait))
645 wake_up_interruptible_sync(&pipe->wait);
646 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
Jens Axboe5274f052006-03-30 15:15:30 +0200647 do_wakeup = 0;
648 }
649
Ingo Molnar3a326a22006-04-10 15:18:35 +0200650 pipe_wait(pipe);
Jens Axboe5274f052006-03-30 15:15:30 +0200651 }
652
Ingo Molnar3a326a22006-04-10 15:18:35 +0200653 if (pipe->inode)
654 mutex_unlock(&pipe->inode->i_mutex);
Jens Axboe5274f052006-03-30 15:15:30 +0200655
656 if (do_wakeup) {
Jens Axboec0bd1f62006-04-10 09:03:32 +0200657 smp_mb();
Ingo Molnar3a326a22006-04-10 15:18:35 +0200658 if (waitqueue_active(&pipe->wait))
659 wake_up_interruptible(&pipe->wait);
660 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
Jens Axboe5274f052006-03-30 15:15:30 +0200661 }
662
663 mutex_lock(&out->f_mapping->host->i_mutex);
664 out->f_pos = sd.pos;
665 mutex_unlock(&out->f_mapping->host->i_mutex);
666 return ret;
667
668}
669
Jens Axboe83f91352006-04-02 23:05:09 +0200670/**
671 * generic_file_splice_write - splice data from a pipe to a file
Ingo Molnar3a326a22006-04-10 15:18:35 +0200672 * @pipe: pipe info
Jens Axboe83f91352006-04-02 23:05:09 +0200673 * @out: file to write to
674 * @len: number of bytes to splice
675 * @flags: splice modifier flags
676 *
677 * Will either move or copy pages (determined by @flags options) from
678 * the given pipe inode to the given file.
679 *
680 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200681ssize_t
682generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
683 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200684{
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200685 struct address_space *mapping = out->f_mapping;
Ingo Molnar3a326a22006-04-10 15:18:35 +0200686 ssize_t ret;
687
688 ret = move_from_pipe(pipe, out, len, flags, pipe_to_file);
Jens Axboe4f6f0bd2006-04-02 23:04:46 +0200689
690 /*
691 * if file or inode is SYNC and we actually wrote some data, sync it
692 */
693 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
694 && ret > 0) {
695 struct inode *inode = mapping->host;
696 int err;
697
698 mutex_lock(&inode->i_mutex);
699 err = generic_osync_inode(mapping->host, mapping,
700 OSYNC_METADATA|OSYNC_DATA);
701 mutex_unlock(&inode->i_mutex);
702
703 if (err)
704 ret = err;
705 }
706
707 return ret;
Jens Axboe5274f052006-03-30 15:15:30 +0200708}
709
Jens Axboe059a8f32006-04-02 23:06:05 +0200710EXPORT_SYMBOL(generic_file_splice_write);
711
Jens Axboe83f91352006-04-02 23:05:09 +0200712/**
713 * generic_splice_sendpage - splice data from a pipe to a socket
714 * @inode: pipe inode
715 * @out: socket to write to
716 * @len: number of bytes to splice
717 * @flags: splice modifier flags
718 *
719 * Will send @len bytes from the pipe to a network socket. No data copying
720 * is involved.
721 *
722 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200723ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
Jens Axboe5274f052006-03-30 15:15:30 +0200724 size_t len, unsigned int flags)
725{
Ingo Molnar3a326a22006-04-10 15:18:35 +0200726 return move_from_pipe(pipe, out, len, flags, pipe_to_sendpage);
Jens Axboe5274f052006-03-30 15:15:30 +0200727}
728
Jens Axboe059a8f32006-04-02 23:06:05 +0200729EXPORT_SYMBOL(generic_splice_sendpage);
Jeff Garzika0f06782006-03-30 23:06:13 -0500730
Jens Axboe83f91352006-04-02 23:05:09 +0200731/*
732 * Attempt to initiate a splice from pipe to file.
733 */
Ingo Molnar3a326a22006-04-10 15:18:35 +0200734static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
Jens Axboeb92ce552006-04-11 13:52:07 +0200735 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200736{
737 loff_t pos;
738 int ret;
739
740 if (!out->f_op || !out->f_op->splice_write)
741 return -EINVAL;
742
743 if (!(out->f_mode & FMODE_WRITE))
744 return -EBADF;
745
746 pos = out->f_pos;
Ingo Molnar529565d2006-04-10 15:18:58 +0200747
Jens Axboe5274f052006-03-30 15:15:30 +0200748 ret = rw_verify_area(WRITE, out, &pos, len);
749 if (unlikely(ret < 0))
750 return ret;
751
752 return out->f_op->splice_write(pipe, out, len, flags);
753}
754
Jens Axboe83f91352006-04-02 23:05:09 +0200755/*
756 * Attempt to initiate a splice from a file to a pipe.
757 */
Jens Axboeb92ce552006-04-11 13:52:07 +0200758static long do_splice_to(struct file *in, struct pipe_inode_info *pipe,
759 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200760{
761 loff_t pos, isize, left;
762 int ret;
763
764 if (!in->f_op || !in->f_op->splice_read)
765 return -EINVAL;
766
767 if (!(in->f_mode & FMODE_READ))
768 return -EBADF;
769
770 pos = in->f_pos;
Ingo Molnar529565d2006-04-10 15:18:58 +0200771
Jens Axboe5274f052006-03-30 15:15:30 +0200772 ret = rw_verify_area(READ, in, &pos, len);
773 if (unlikely(ret < 0))
774 return ret;
775
776 isize = i_size_read(in->f_mapping->host);
777 if (unlikely(in->f_pos >= isize))
778 return 0;
779
780 left = isize - in->f_pos;
781 if (left < len)
782 len = left;
783
784 return in->f_op->splice_read(in, pipe, len, flags);
785}
786
Jens Axboeb92ce552006-04-11 13:52:07 +0200787long do_splice_direct(struct file *in, struct file *out, size_t len,
788 unsigned int flags)
789{
790 struct pipe_inode_info *pipe;
791 long ret, bytes;
792 umode_t i_mode;
793 int i;
794
795 /*
796 * We require the input being a regular file, as we don't want to
797 * randomly drop data for eg socket -> socket splicing. Use the
798 * piped splicing for that!
799 */
800 i_mode = in->f_dentry->d_inode->i_mode;
801 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
802 return -EINVAL;
803
804 /*
805 * neither in nor out is a pipe, setup an internal pipe attached to
806 * 'out' and transfer the wanted data from 'in' to 'out' through that
807 */
808 pipe = current->splice_pipe;
809 if (!pipe) {
810 pipe = alloc_pipe_info(NULL);
811 if (!pipe)
812 return -ENOMEM;
813
814 /*
815 * We don't have an immediate reader, but we'll read the stuff
816 * out of the pipe right after the move_to_pipe(). So set
817 * PIPE_READERS appropriately.
818 */
819 pipe->readers = 1;
820
821 current->splice_pipe = pipe;
822 }
823
824 /*
825 * do the splice
826 */
827 ret = 0;
828 bytes = 0;
829
830 while (len) {
831 size_t read_len, max_read_len;
832
833 /*
834 * Do at most PIPE_BUFFERS pages worth of transfer:
835 */
836 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
837
838 ret = do_splice_to(in, pipe, max_read_len, flags);
839 if (unlikely(ret < 0))
840 goto out_release;
841
842 read_len = ret;
843
844 /*
845 * NOTE: nonblocking mode only applies to the input. We
846 * must not do the output in nonblocking mode as then we
847 * could get stuck data in the internal pipe:
848 */
849 ret = do_splice_from(pipe, out, read_len,
850 flags & ~SPLICE_F_NONBLOCK);
851 if (unlikely(ret < 0))
852 goto out_release;
853
854 bytes += ret;
855 len -= ret;
856
857 /*
858 * In nonblocking mode, if we got back a short read then
859 * that was due to either an IO error or due to the
860 * pagecache entry not being there. In the IO error case
861 * the _next_ splice attempt will produce a clean IO error
862 * return value (not a short read), so in both cases it's
863 * correct to break out of the loop here:
864 */
865 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
866 break;
867 }
868
869 pipe->nrbufs = pipe->curbuf = 0;
870
871 return bytes;
872
873out_release:
874 /*
875 * If we did an incomplete transfer we must release
876 * the pipe buffers in question:
877 */
878 for (i = 0; i < PIPE_BUFFERS; i++) {
879 struct pipe_buffer *buf = pipe->bufs + i;
880
881 if (buf->ops) {
882 buf->ops->release(pipe, buf);
883 buf->ops = NULL;
884 }
885 }
886 pipe->nrbufs = pipe->curbuf = 0;
887
888 /*
889 * If we transferred some data, return the number of bytes:
890 */
891 if (bytes > 0)
892 return bytes;
893
894 return ret;
895}
896
897EXPORT_SYMBOL(do_splice_direct);
898
Jens Axboe83f91352006-04-02 23:05:09 +0200899/*
900 * Determine where to splice to/from.
901 */
Ingo Molnar529565d2006-04-10 15:18:58 +0200902static long do_splice(struct file *in, loff_t __user *off_in,
903 struct file *out, loff_t __user *off_out,
904 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200905{
Ingo Molnar3a326a22006-04-10 15:18:35 +0200906 struct pipe_inode_info *pipe;
Jens Axboe5274f052006-03-30 15:15:30 +0200907
Ingo Molnar3a326a22006-04-10 15:18:35 +0200908 pipe = in->f_dentry->d_inode->i_pipe;
Ingo Molnar529565d2006-04-10 15:18:58 +0200909 if (pipe) {
910 if (off_in)
911 return -ESPIPE;
Jens Axboeb92ce552006-04-11 13:52:07 +0200912 if (off_out) {
913 if (out->f_op->llseek == no_llseek)
914 return -EINVAL;
915 if (copy_from_user(&out->f_pos, off_out,
916 sizeof(loff_t)))
917 return -EFAULT;
918 }
Ingo Molnar529565d2006-04-10 15:18:58 +0200919
Jens Axboeb92ce552006-04-11 13:52:07 +0200920 return do_splice_from(pipe, out, len, flags);
Ingo Molnar529565d2006-04-10 15:18:58 +0200921 }
Jens Axboe5274f052006-03-30 15:15:30 +0200922
Ingo Molnar3a326a22006-04-10 15:18:35 +0200923 pipe = out->f_dentry->d_inode->i_pipe;
Ingo Molnar529565d2006-04-10 15:18:58 +0200924 if (pipe) {
925 if (off_out)
926 return -ESPIPE;
Jens Axboeb92ce552006-04-11 13:52:07 +0200927 if (off_in) {
928 if (in->f_op->llseek == no_llseek)
929 return -EINVAL;
930 if (copy_from_user(&in->f_pos, off_in, sizeof(loff_t)))
931 return -EFAULT;
932 }
Ingo Molnar529565d2006-04-10 15:18:58 +0200933
Jens Axboeb92ce552006-04-11 13:52:07 +0200934 return do_splice_to(in, pipe, len, flags);
Ingo Molnar529565d2006-04-10 15:18:58 +0200935 }
Jens Axboe5274f052006-03-30 15:15:30 +0200936
937 return -EINVAL;
938}
939
Ingo Molnar529565d2006-04-10 15:18:58 +0200940asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
941 int fd_out, loff_t __user *off_out,
942 size_t len, unsigned int flags)
Jens Axboe5274f052006-03-30 15:15:30 +0200943{
944 long error;
945 struct file *in, *out;
946 int fput_in, fput_out;
947
948 if (unlikely(!len))
949 return 0;
950
951 error = -EBADF;
Ingo Molnar529565d2006-04-10 15:18:58 +0200952 in = fget_light(fd_in, &fput_in);
Jens Axboe5274f052006-03-30 15:15:30 +0200953 if (in) {
954 if (in->f_mode & FMODE_READ) {
Ingo Molnar529565d2006-04-10 15:18:58 +0200955 out = fget_light(fd_out, &fput_out);
Jens Axboe5274f052006-03-30 15:15:30 +0200956 if (out) {
957 if (out->f_mode & FMODE_WRITE)
Ingo Molnar529565d2006-04-10 15:18:58 +0200958 error = do_splice(in, off_in,
959 out, off_out,
960 len, flags);
Jens Axboe5274f052006-03-30 15:15:30 +0200961 fput_light(out, fput_out);
962 }
963 }
964
965 fput_light(in, fput_in);
966 }
967
968 return error;
969}