blob: 37dff5b2d56dcb0e57511337796d3ed200285bdb [file] [log] [blame]
Chris Masonc8b97812008-10-29 14:49:59 -04001/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/bio.h>
21#include <linux/buffer_head.h>
22#include <linux/file.h>
23#include <linux/fs.h>
24#include <linux/pagemap.h>
25#include <linux/highmem.h>
26#include <linux/time.h>
27#include <linux/init.h>
28#include <linux/string.h>
Chris Masonc8b97812008-10-29 14:49:59 -040029#include <linux/backing-dev.h>
30#include <linux/mpage.h>
31#include <linux/swap.h>
32#include <linux/writeback.h>
33#include <linux/bit_spinlock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Mason4b4e25f2008-11-20 10:22:27 -050035#include "compat.h"
Chris Masonc8b97812008-10-29 14:49:59 -040036#include "ctree.h"
37#include "disk-io.h"
38#include "transaction.h"
39#include "btrfs_inode.h"
40#include "volumes.h"
41#include "ordered-data.h"
Chris Masonc8b97812008-10-29 14:49:59 -040042#include "compression.h"
43#include "extent_io.h"
44#include "extent_map.h"
45
46struct compressed_bio {
47 /* number of bios pending for this compressed extent */
48 atomic_t pending_bios;
49
50 /* the pages with the compressed data on them */
51 struct page **compressed_pages;
52
53 /* inode that owns this data */
54 struct inode *inode;
55
56 /* starting offset in the inode for our pages */
57 u64 start;
58
59 /* number of bytes in the inode we're working on */
60 unsigned long len;
61
62 /* number of bytes on disk */
63 unsigned long compressed_len;
64
Li Zefan261507a02010-12-17 14:21:50 +080065 /* the compression algorithm for this bio */
66 int compress_type;
67
Chris Masonc8b97812008-10-29 14:49:59 -040068 /* number of compressed pages in the array */
69 unsigned long nr_pages;
70
71 /* IO errors */
72 int errors;
Chris Masond20f7042008-12-08 16:58:54 -050073 int mirror_num;
Chris Masonc8b97812008-10-29 14:49:59 -040074
75 /* for reads, this is the bio we are copying the data into */
76 struct bio *orig_bio;
Chris Masond20f7042008-12-08 16:58:54 -050077
78 /*
79 * the start of a variable length array of checksums only
80 * used by reads
81 */
82 u32 sums;
Chris Masonc8b97812008-10-29 14:49:59 -040083};
84
Eric Sandeen48a3b632013-04-25 20:41:01 +000085static int btrfs_decompress_biovec(int type, struct page **pages_in,
86 u64 disk_start, struct bio_vec *bvec,
87 int vcnt, size_t srclen);
88
Chris Masond20f7042008-12-08 16:58:54 -050089static inline int compressed_bio_size(struct btrfs_root *root,
90 unsigned long disk_size)
91{
David Sterba6c417612011-04-13 15:41:04 +020092 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
93
Chris Masond20f7042008-12-08 16:58:54 -050094 return sizeof(struct compressed_bio) +
95 ((disk_size + root->sectorsize - 1) / root->sectorsize) *
96 csum_size;
97}
98
Chris Masonc8b97812008-10-29 14:49:59 -040099static struct bio *compressed_bio_alloc(struct block_device *bdev,
100 u64 first_byte, gfp_t gfp_flags)
101{
Chris Masonc8b97812008-10-29 14:49:59 -0400102 int nr_vecs;
103
104 nr_vecs = bio_get_nr_vecs(bdev);
Miao Xie88f794e2010-11-22 03:02:55 +0000105 return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags);
Chris Masonc8b97812008-10-29 14:49:59 -0400106}
107
Chris Masond20f7042008-12-08 16:58:54 -0500108static int check_compressed_csum(struct inode *inode,
109 struct compressed_bio *cb,
110 u64 disk_start)
111{
112 int ret;
Chris Masond20f7042008-12-08 16:58:54 -0500113 struct page *page;
114 unsigned long i;
115 char *kaddr;
116 u32 csum;
117 u32 *cb_sum = &cb->sums;
118
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200119 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
Chris Masond20f7042008-12-08 16:58:54 -0500120 return 0;
121
122 for (i = 0; i < cb->nr_pages; i++) {
123 page = cb->compressed_pages[i];
124 csum = ~(u32)0;
125
Cong Wang7ac687d2011-11-25 23:14:28 +0800126 kaddr = kmap_atomic(page);
Liu Bob0496682013-03-14 14:57:45 +0000127 csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE);
Chris Masond20f7042008-12-08 16:58:54 -0500128 btrfs_csum_final(csum, (char *)&csum);
Cong Wang7ac687d2011-11-25 23:14:28 +0800129 kunmap_atomic(kaddr);
Chris Masond20f7042008-12-08 16:58:54 -0500130
131 if (csum != *cb_sum) {
Li Zefan33345d012011-04-20 10:31:50 +0800132 printk(KERN_INFO "btrfs csum failed ino %llu "
Chris Masond3977122009-01-05 21:25:51 -0500133 "extent %llu csum %u "
Li Zefan33345d012011-04-20 10:31:50 +0800134 "wanted %u mirror %d\n",
135 (unsigned long long)btrfs_ino(inode),
Chris Masond20f7042008-12-08 16:58:54 -0500136 (unsigned long long)disk_start,
137 csum, *cb_sum, cb->mirror_num);
138 ret = -EIO;
139 goto fail;
140 }
141 cb_sum++;
142
143 }
144 ret = 0;
145fail:
146 return ret;
147}
148
Chris Masonc8b97812008-10-29 14:49:59 -0400149/* when we finish reading compressed pages from the disk, we
150 * decompress them and then run the bio end_io routines on the
151 * decompressed pages (in the inode address space).
152 *
153 * This allows the checksumming and other IO error handling routines
154 * to work normally
155 *
156 * The compressed pages are freed here, and it must be run
157 * in process context
158 */
159static void end_compressed_bio_read(struct bio *bio, int err)
160{
Chris Masonc8b97812008-10-29 14:49:59 -0400161 struct compressed_bio *cb = bio->bi_private;
162 struct inode *inode;
163 struct page *page;
164 unsigned long index;
165 int ret;
166
167 if (err)
168 cb->errors = 1;
169
170 /* if there are more bios still pending for this compressed
171 * extent, just exit
172 */
173 if (!atomic_dec_and_test(&cb->pending_bios))
174 goto out;
175
Chris Masond20f7042008-12-08 16:58:54 -0500176 inode = cb->inode;
177 ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9);
178 if (ret)
179 goto csum_failed;
180
Chris Masonc8b97812008-10-29 14:49:59 -0400181 /* ok, we're the last bio for this extent, lets start
182 * the decompression.
183 */
Li Zefan261507a02010-12-17 14:21:50 +0800184 ret = btrfs_decompress_biovec(cb->compress_type,
185 cb->compressed_pages,
186 cb->start,
187 cb->orig_bio->bi_io_vec,
188 cb->orig_bio->bi_vcnt,
189 cb->compressed_len);
Chris Masond20f7042008-12-08 16:58:54 -0500190csum_failed:
Chris Masonc8b97812008-10-29 14:49:59 -0400191 if (ret)
192 cb->errors = 1;
193
194 /* release the compressed pages */
195 index = 0;
196 for (index = 0; index < cb->nr_pages; index++) {
197 page = cb->compressed_pages[index];
198 page->mapping = NULL;
199 page_cache_release(page);
200 }
201
202 /* do io completion on the original bio */
Chris Mason771ed682008-11-06 22:02:51 -0500203 if (cb->errors) {
Chris Masonc8b97812008-10-29 14:49:59 -0400204 bio_io_error(cb->orig_bio);
Chris Masond20f7042008-12-08 16:58:54 -0500205 } else {
206 int bio_index = 0;
207 struct bio_vec *bvec = cb->orig_bio->bi_io_vec;
208
209 /*
210 * we have verified the checksum already, set page
211 * checked so the end_io handlers know about it
212 */
Chris Masond3977122009-01-05 21:25:51 -0500213 while (bio_index < cb->orig_bio->bi_vcnt) {
Chris Masond20f7042008-12-08 16:58:54 -0500214 SetPageChecked(bvec->bv_page);
215 bvec++;
216 bio_index++;
217 }
Chris Masonc8b97812008-10-29 14:49:59 -0400218 bio_endio(cb->orig_bio, 0);
Chris Masond20f7042008-12-08 16:58:54 -0500219 }
Chris Masonc8b97812008-10-29 14:49:59 -0400220
221 /* finally free the cb struct */
222 kfree(cb->compressed_pages);
223 kfree(cb);
224out:
225 bio_put(bio);
226}
227
228/*
229 * Clear the writeback bits on all of the file
230 * pages for a compressed write
231 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100232static noinline void end_compressed_writeback(struct inode *inode, u64 start,
233 unsigned long ram_size)
Chris Masonc8b97812008-10-29 14:49:59 -0400234{
235 unsigned long index = start >> PAGE_CACHE_SHIFT;
236 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
237 struct page *pages[16];
238 unsigned long nr_pages = end_index - index + 1;
239 int i;
240 int ret;
241
Chris Masond3977122009-01-05 21:25:51 -0500242 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -0400243 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -0500244 min_t(unsigned long,
245 nr_pages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -0400246 if (ret == 0) {
247 nr_pages -= 1;
248 index += 1;
249 continue;
250 }
251 for (i = 0; i < ret; i++) {
252 end_page_writeback(pages[i]);
253 page_cache_release(pages[i]);
254 }
255 nr_pages -= ret;
256 index += ret;
257 }
258 /* the inode may be gone now */
Chris Masonc8b97812008-10-29 14:49:59 -0400259}
260
261/*
262 * do the cleanup once all the compressed pages hit the disk.
263 * This will clear writeback on the file pages and free the compressed
264 * pages.
265 *
266 * This also calls the writeback end hooks for the file pages so that
267 * metadata and checksums can be updated in the file.
268 */
269static void end_compressed_bio_write(struct bio *bio, int err)
270{
271 struct extent_io_tree *tree;
272 struct compressed_bio *cb = bio->bi_private;
273 struct inode *inode;
274 struct page *page;
275 unsigned long index;
276
277 if (err)
278 cb->errors = 1;
279
280 /* if there are more bios still pending for this compressed
281 * extent, just exit
282 */
283 if (!atomic_dec_and_test(&cb->pending_bios))
284 goto out;
285
286 /* ok, we're the last bio for this extent, step one is to
287 * call back into the FS and do all the end_io operations
288 */
289 inode = cb->inode;
290 tree = &BTRFS_I(inode)->io_tree;
Chris Mason70b99e62008-10-31 12:46:39 -0400291 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
Chris Masonc8b97812008-10-29 14:49:59 -0400292 tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
293 cb->start,
294 cb->start + cb->len - 1,
295 NULL, 1);
Chris Mason70b99e62008-10-31 12:46:39 -0400296 cb->compressed_pages[0]->mapping = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400297
298 end_compressed_writeback(inode, cb->start, cb->len);
299 /* note, our inode could be gone now */
300
301 /*
302 * release the compressed pages, these came from alloc_page and
303 * are not attached to the inode at all
304 */
305 index = 0;
306 for (index = 0; index < cb->nr_pages; index++) {
307 page = cb->compressed_pages[index];
308 page->mapping = NULL;
309 page_cache_release(page);
310 }
311
312 /* finally free the cb struct */
313 kfree(cb->compressed_pages);
314 kfree(cb);
315out:
316 bio_put(bio);
317}
318
319/*
320 * worker function to build and submit bios for previously compressed pages.
321 * The corresponding pages in the inode should be marked for writeback
322 * and the compressed pages should have a reference on them for dropping
323 * when the IO is complete.
324 *
325 * This also checksums the file bytes and gets things ready for
326 * the end io hooks.
327 */
328int btrfs_submit_compressed_write(struct inode *inode, u64 start,
329 unsigned long len, u64 disk_start,
330 unsigned long compressed_len,
331 struct page **compressed_pages,
332 unsigned long nr_pages)
333{
334 struct bio *bio = NULL;
335 struct btrfs_root *root = BTRFS_I(inode)->root;
336 struct compressed_bio *cb;
337 unsigned long bytes_left;
338 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
David Sterba306e16c2011-04-19 14:29:38 +0200339 int pg_index = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400340 struct page *page;
341 u64 first_byte = disk_start;
342 struct block_device *bdev;
343 int ret;
Li Zefane55179b2011-07-14 03:16:47 +0000344 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
Chris Masonc8b97812008-10-29 14:49:59 -0400345
346 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
Chris Masond20f7042008-12-08 16:58:54 -0500347 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
Yoshinori Sanodac97e512011-02-15 12:01:42 +0000348 if (!cb)
349 return -ENOMEM;
Chris Masonc8b97812008-10-29 14:49:59 -0400350 atomic_set(&cb->pending_bios, 0);
351 cb->errors = 0;
352 cb->inode = inode;
353 cb->start = start;
354 cb->len = len;
Chris Masond20f7042008-12-08 16:58:54 -0500355 cb->mirror_num = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400356 cb->compressed_pages = compressed_pages;
357 cb->compressed_len = compressed_len;
358 cb->orig_bio = NULL;
359 cb->nr_pages = nr_pages;
360
361 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
362
Chris Masonc8b97812008-10-29 14:49:59 -0400363 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
Yoshinori Sanodac97e512011-02-15 12:01:42 +0000364 if(!bio) {
365 kfree(cb);
366 return -ENOMEM;
367 }
Chris Masonc8b97812008-10-29 14:49:59 -0400368 bio->bi_private = cb;
369 bio->bi_end_io = end_compressed_bio_write;
370 atomic_inc(&cb->pending_bios);
371
372 /* create and submit bios for the compressed pages */
373 bytes_left = compressed_len;
David Sterba306e16c2011-04-19 14:29:38 +0200374 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
375 page = compressed_pages[pg_index];
Chris Masonc8b97812008-10-29 14:49:59 -0400376 page->mapping = inode->i_mapping;
377 if (bio->bi_size)
David Woodhouse64a16702009-07-15 23:29:37 +0100378 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
Chris Masonc8b97812008-10-29 14:49:59 -0400379 PAGE_CACHE_SIZE,
380 bio, 0);
381 else
382 ret = 0;
383
Chris Mason70b99e62008-10-31 12:46:39 -0400384 page->mapping = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400385 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) <
386 PAGE_CACHE_SIZE) {
387 bio_get(bio);
388
Chris Masonaf09abf2008-11-07 12:35:44 -0500389 /*
390 * inc the count before we submit the bio so
391 * we know the end IO handler won't happen before
392 * we inc the count. Otherwise, the cb might get
393 * freed before we're done setting it up
394 */
395 atomic_inc(&cb->pending_bios);
Chris Masonc8b97812008-10-29 14:49:59 -0400396 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100397 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400398
Li Zefane55179b2011-07-14 03:16:47 +0000399 if (!skip_sum) {
400 ret = btrfs_csum_one_bio(root, inode, bio,
401 start, 1);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100402 BUG_ON(ret); /* -ENOMEM */
Li Zefane55179b2011-07-14 03:16:47 +0000403 }
Chris Masond20f7042008-12-08 16:58:54 -0500404
Chris Masonc8b97812008-10-29 14:49:59 -0400405 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100406 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400407
408 bio_put(bio);
409
410 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
Tsutomu Itohe627ee72012-04-12 16:03:56 -0400411 BUG_ON(!bio);
Chris Masonc8b97812008-10-29 14:49:59 -0400412 bio->bi_private = cb;
413 bio->bi_end_io = end_compressed_bio_write;
414 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
415 }
Chris Masoncfbc2462008-10-30 13:22:14 -0400416 if (bytes_left < PAGE_CACHE_SIZE) {
417 printk("bytes left %lu compress len %lu nr %lu\n",
418 bytes_left, cb->compressed_len, cb->nr_pages);
419 }
Chris Masonc8b97812008-10-29 14:49:59 -0400420 bytes_left -= PAGE_CACHE_SIZE;
421 first_byte += PAGE_CACHE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -0500422 cond_resched();
Chris Masonc8b97812008-10-29 14:49:59 -0400423 }
424 bio_get(bio);
425
426 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100427 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400428
Li Zefane55179b2011-07-14 03:16:47 +0000429 if (!skip_sum) {
430 ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100431 BUG_ON(ret); /* -ENOMEM */
Li Zefane55179b2011-07-14 03:16:47 +0000432 }
Chris Masond20f7042008-12-08 16:58:54 -0500433
Chris Masonc8b97812008-10-29 14:49:59 -0400434 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100435 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400436
437 bio_put(bio);
438 return 0;
439}
440
Chris Mason771ed682008-11-06 22:02:51 -0500441static noinline int add_ra_bio_pages(struct inode *inode,
442 u64 compressed_end,
443 struct compressed_bio *cb)
444{
445 unsigned long end_index;
David Sterba306e16c2011-04-19 14:29:38 +0200446 unsigned long pg_index;
Chris Mason771ed682008-11-06 22:02:51 -0500447 u64 last_offset;
448 u64 isize = i_size_read(inode);
449 int ret;
450 struct page *page;
451 unsigned long nr_pages = 0;
452 struct extent_map *em;
453 struct address_space *mapping = inode->i_mapping;
Chris Mason771ed682008-11-06 22:02:51 -0500454 struct extent_map_tree *em_tree;
455 struct extent_io_tree *tree;
456 u64 end;
457 int misses = 0;
458
459 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
460 last_offset = (page_offset(page) + PAGE_CACHE_SIZE);
461 em_tree = &BTRFS_I(inode)->extent_tree;
462 tree = &BTRFS_I(inode)->io_tree;
463
464 if (isize == 0)
465 return 0;
466
467 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
468
Chris Masond3977122009-01-05 21:25:51 -0500469 while (last_offset < compressed_end) {
David Sterba306e16c2011-04-19 14:29:38 +0200470 pg_index = last_offset >> PAGE_CACHE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -0500471
David Sterba306e16c2011-04-19 14:29:38 +0200472 if (pg_index > end_index)
Chris Mason771ed682008-11-06 22:02:51 -0500473 break;
474
475 rcu_read_lock();
David Sterba306e16c2011-04-19 14:29:38 +0200476 page = radix_tree_lookup(&mapping->page_tree, pg_index);
Chris Mason771ed682008-11-06 22:02:51 -0500477 rcu_read_unlock();
478 if (page) {
479 misses++;
480 if (misses > 4)
481 break;
482 goto next;
483 }
484
Nick Piggin28ecb6092010-03-17 13:31:04 +0000485 page = __page_cache_alloc(mapping_gfp_mask(mapping) &
486 ~__GFP_FS);
Chris Mason771ed682008-11-06 22:02:51 -0500487 if (!page)
488 break;
489
David Sterba306e16c2011-04-19 14:29:38 +0200490 if (add_to_page_cache_lru(page, mapping, pg_index,
Nick Piggin28ecb6092010-03-17 13:31:04 +0000491 GFP_NOFS)) {
Chris Mason771ed682008-11-06 22:02:51 -0500492 page_cache_release(page);
493 goto next;
494 }
495
Chris Mason771ed682008-11-06 22:02:51 -0500496 end = last_offset + PAGE_CACHE_SIZE - 1;
497 /*
498 * at this point, we have a locked page in the page cache
499 * for these bytes in the file. But, we have to make
500 * sure they map to this compressed extent on disk.
501 */
502 set_page_extent_mapped(page);
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100503 lock_extent(tree, last_offset, end);
Chris Mason890871b2009-09-02 16:24:52 -0400504 read_lock(&em_tree->lock);
Chris Mason771ed682008-11-06 22:02:51 -0500505 em = lookup_extent_mapping(em_tree, last_offset,
506 PAGE_CACHE_SIZE);
Chris Mason890871b2009-09-02 16:24:52 -0400507 read_unlock(&em_tree->lock);
Chris Mason771ed682008-11-06 22:02:51 -0500508
509 if (!em || last_offset < em->start ||
510 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
511 (em->block_start >> 9) != cb->orig_bio->bi_sector) {
512 free_extent_map(em);
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100513 unlock_extent(tree, last_offset, end);
Chris Mason771ed682008-11-06 22:02:51 -0500514 unlock_page(page);
515 page_cache_release(page);
516 break;
517 }
518 free_extent_map(em);
519
520 if (page->index == end_index) {
521 char *userpage;
522 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1);
523
524 if (zero_offset) {
525 int zeros;
526 zeros = PAGE_CACHE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +0800527 userpage = kmap_atomic(page);
Chris Mason771ed682008-11-06 22:02:51 -0500528 memset(userpage + zero_offset, 0, zeros);
529 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +0800530 kunmap_atomic(userpage);
Chris Mason771ed682008-11-06 22:02:51 -0500531 }
532 }
533
534 ret = bio_add_page(cb->orig_bio, page,
535 PAGE_CACHE_SIZE, 0);
536
537 if (ret == PAGE_CACHE_SIZE) {
538 nr_pages++;
539 page_cache_release(page);
540 } else {
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100541 unlock_extent(tree, last_offset, end);
Chris Mason771ed682008-11-06 22:02:51 -0500542 unlock_page(page);
543 page_cache_release(page);
544 break;
545 }
546next:
547 last_offset += PAGE_CACHE_SIZE;
548 }
Chris Mason771ed682008-11-06 22:02:51 -0500549 return 0;
550}
551
Chris Masonc8b97812008-10-29 14:49:59 -0400552/*
553 * for a compressed read, the bio we get passed has all the inode pages
554 * in it. We don't actually do IO on those pages but allocate new ones
555 * to hold the compressed pages on disk.
556 *
557 * bio->bi_sector points to the compressed extent on disk
558 * bio->bi_io_vec points to all of the inode pages
559 * bio->bi_vcnt is a count of pages
560 *
561 * After the compressed pages are read, we copy the bytes into the
562 * bio we were passed and then call the bio end_io calls
563 */
564int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
565 int mirror_num, unsigned long bio_flags)
566{
567 struct extent_io_tree *tree;
568 struct extent_map_tree *em_tree;
569 struct compressed_bio *cb;
570 struct btrfs_root *root = BTRFS_I(inode)->root;
571 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
572 unsigned long compressed_len;
573 unsigned long nr_pages;
David Sterba306e16c2011-04-19 14:29:38 +0200574 unsigned long pg_index;
Chris Masonc8b97812008-10-29 14:49:59 -0400575 struct page *page;
576 struct block_device *bdev;
577 struct bio *comp_bio;
578 u64 cur_disk_byte = (u64)bio->bi_sector << 9;
Chris Masone04ca622008-11-10 11:44:58 -0500579 u64 em_len;
580 u64 em_start;
Chris Masonc8b97812008-10-29 14:49:59 -0400581 struct extent_map *em;
liubo6b82ce82011-01-26 06:21:39 +0000582 int ret = -ENOMEM;
Josef Bacik15e30042012-10-05 13:39:50 -0400583 int faili = 0;
Chris Masond20f7042008-12-08 16:58:54 -0500584 u32 *sums;
Chris Masonc8b97812008-10-29 14:49:59 -0400585
586 tree = &BTRFS_I(inode)->io_tree;
587 em_tree = &BTRFS_I(inode)->extent_tree;
588
589 /* we need the actual starting offset of this extent in the file */
Chris Mason890871b2009-09-02 16:24:52 -0400590 read_lock(&em_tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -0400591 em = lookup_extent_mapping(em_tree,
592 page_offset(bio->bi_io_vec->bv_page),
593 PAGE_CACHE_SIZE);
Chris Mason890871b2009-09-02 16:24:52 -0400594 read_unlock(&em_tree->lock);
Tsutomu Itoh285190d2012-02-16 16:23:58 +0900595 if (!em)
596 return -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400597
Chris Masond20f7042008-12-08 16:58:54 -0500598 compressed_len = em->block_len;
599 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
liubo6b82ce82011-01-26 06:21:39 +0000600 if (!cb)
601 goto out;
602
Chris Masonc8b97812008-10-29 14:49:59 -0400603 atomic_set(&cb->pending_bios, 0);
604 cb->errors = 0;
605 cb->inode = inode;
Chris Masond20f7042008-12-08 16:58:54 -0500606 cb->mirror_num = mirror_num;
607 sums = &cb->sums;
Chris Masonc8b97812008-10-29 14:49:59 -0400608
Yan Zhengff5b7ee2008-11-10 07:34:43 -0500609 cb->start = em->orig_start;
Chris Masone04ca622008-11-10 11:44:58 -0500610 em_len = em->len;
611 em_start = em->start;
Chris Masond20f7042008-12-08 16:58:54 -0500612
Chris Masonc8b97812008-10-29 14:49:59 -0400613 free_extent_map(em);
Chris Masone04ca622008-11-10 11:44:58 -0500614 em = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400615
616 cb->len = uncompressed_len;
617 cb->compressed_len = compressed_len;
Li Zefan261507a02010-12-17 14:21:50 +0800618 cb->compress_type = extent_compress_type(bio_flags);
Chris Masonc8b97812008-10-29 14:49:59 -0400619 cb->orig_bio = bio;
620
621 nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
622 PAGE_CACHE_SIZE;
liubo6b82ce82011-01-26 06:21:39 +0000623 cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
Chris Masonc8b97812008-10-29 14:49:59 -0400624 GFP_NOFS);
liubo6b82ce82011-01-26 06:21:39 +0000625 if (!cb->compressed_pages)
626 goto fail1;
627
Chris Masonc8b97812008-10-29 14:49:59 -0400628 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
629
David Sterba306e16c2011-04-19 14:29:38 +0200630 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
631 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
Chris Masonc8b97812008-10-29 14:49:59 -0400632 __GFP_HIGHMEM);
Josef Bacik15e30042012-10-05 13:39:50 -0400633 if (!cb->compressed_pages[pg_index]) {
634 faili = pg_index - 1;
635 ret = -ENOMEM;
liubo6b82ce82011-01-26 06:21:39 +0000636 goto fail2;
Josef Bacik15e30042012-10-05 13:39:50 -0400637 }
Chris Masonc8b97812008-10-29 14:49:59 -0400638 }
Josef Bacik15e30042012-10-05 13:39:50 -0400639 faili = nr_pages - 1;
Chris Masonc8b97812008-10-29 14:49:59 -0400640 cb->nr_pages = nr_pages;
641
Mark Fasheh4b384312013-08-06 11:42:50 -0700642 /* In the parent-locked case, we only locked the range we are
643 * interested in. In all other cases, we can opportunistically
644 * cache decompressed data that goes beyond the requested range. */
645 if (!(bio_flags & EXTENT_BIO_PARENT_LOCKED))
646 add_ra_bio_pages(inode, em_start + em_len, cb);
Chris Mason771ed682008-11-06 22:02:51 -0500647
Chris Mason771ed682008-11-06 22:02:51 -0500648 /* include any pages we added in add_ra-bio_pages */
649 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
650 cb->len = uncompressed_len;
651
Chris Masonc8b97812008-10-29 14:49:59 -0400652 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
liubo6b82ce82011-01-26 06:21:39 +0000653 if (!comp_bio)
654 goto fail2;
Chris Masonc8b97812008-10-29 14:49:59 -0400655 comp_bio->bi_private = cb;
656 comp_bio->bi_end_io = end_compressed_bio_read;
657 atomic_inc(&cb->pending_bios);
658
David Sterba306e16c2011-04-19 14:29:38 +0200659 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
660 page = cb->compressed_pages[pg_index];
Chris Masonc8b97812008-10-29 14:49:59 -0400661 page->mapping = inode->i_mapping;
Chris Masond20f7042008-12-08 16:58:54 -0500662 page->index = em_start >> PAGE_CACHE_SHIFT;
663
Chris Masonc8b97812008-10-29 14:49:59 -0400664 if (comp_bio->bi_size)
David Woodhouse64a16702009-07-15 23:29:37 +0100665 ret = tree->ops->merge_bio_hook(READ, page, 0,
Chris Masonc8b97812008-10-29 14:49:59 -0400666 PAGE_CACHE_SIZE,
667 comp_bio, 0);
668 else
669 ret = 0;
670
Chris Mason70b99e62008-10-31 12:46:39 -0400671 page->mapping = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400672 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) <
673 PAGE_CACHE_SIZE) {
674 bio_get(comp_bio);
675
676 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100677 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400678
Chris Masonaf09abf2008-11-07 12:35:44 -0500679 /*
680 * inc the count before we submit the bio so
681 * we know the end IO handler won't happen before
682 * we inc the count. Otherwise, the cb might get
683 * freed before we're done setting it up
684 */
685 atomic_inc(&cb->pending_bios);
686
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200687 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
Tsutomu Itohc2db1072011-03-01 06:48:31 +0000688 ret = btrfs_lookup_bio_sums(root, inode,
689 comp_bio, sums);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100690 BUG_ON(ret); /* -ENOMEM */
Chris Masond20f7042008-12-08 16:58:54 -0500691 }
692 sums += (comp_bio->bi_size + root->sectorsize - 1) /
693 root->sectorsize;
694
695 ret = btrfs_map_bio(root, READ, comp_bio,
696 mirror_num, 0);
Stefan Behrens61891922012-11-05 18:51:52 +0100697 if (ret)
698 bio_endio(comp_bio, ret);
Chris Masonc8b97812008-10-29 14:49:59 -0400699
700 bio_put(comp_bio);
701
702 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
703 GFP_NOFS);
Tsutomu Itohe627ee72012-04-12 16:03:56 -0400704 BUG_ON(!comp_bio);
Chris Mason771ed682008-11-06 22:02:51 -0500705 comp_bio->bi_private = cb;
706 comp_bio->bi_end_io = end_compressed_bio_read;
707
708 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400709 }
710 cur_disk_byte += PAGE_CACHE_SIZE;
711 }
712 bio_get(comp_bio);
713
714 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100715 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400716
Tsutomu Itohc2db1072011-03-01 06:48:31 +0000717 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
718 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100719 BUG_ON(ret); /* -ENOMEM */
Tsutomu Itohc2db1072011-03-01 06:48:31 +0000720 }
Chris Masond20f7042008-12-08 16:58:54 -0500721
722 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
Stefan Behrens61891922012-11-05 18:51:52 +0100723 if (ret)
724 bio_endio(comp_bio, ret);
Chris Masonc8b97812008-10-29 14:49:59 -0400725
726 bio_put(comp_bio);
727 return 0;
liubo6b82ce82011-01-26 06:21:39 +0000728
729fail2:
Josef Bacik15e30042012-10-05 13:39:50 -0400730 while (faili >= 0) {
731 __free_page(cb->compressed_pages[faili]);
732 faili--;
733 }
liubo6b82ce82011-01-26 06:21:39 +0000734
735 kfree(cb->compressed_pages);
736fail1:
737 kfree(cb);
738out:
739 free_extent_map(em);
740 return ret;
Chris Masonc8b97812008-10-29 14:49:59 -0400741}
Li Zefan261507a02010-12-17 14:21:50 +0800742
743static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
744static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES];
745static int comp_num_workspace[BTRFS_COMPRESS_TYPES];
746static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
747static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
748
Eric Sandeen48a3b632013-04-25 20:41:01 +0000749static struct btrfs_compress_op *btrfs_compress_op[] = {
Li Zefan261507a02010-12-17 14:21:50 +0800750 &btrfs_zlib_compress,
Li Zefana6fa6fa2010-10-25 15:12:26 +0800751 &btrfs_lzo_compress,
Li Zefan261507a02010-12-17 14:21:50 +0800752};
753
Jeff Mahoney143bede2012-03-01 14:56:26 +0100754void __init btrfs_init_compress(void)
Li Zefan261507a02010-12-17 14:21:50 +0800755{
756 int i;
757
758 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
759 INIT_LIST_HEAD(&comp_idle_workspace[i]);
760 spin_lock_init(&comp_workspace_lock[i]);
761 atomic_set(&comp_alloc_workspace[i], 0);
762 init_waitqueue_head(&comp_workspace_wait[i]);
763 }
Li Zefan261507a02010-12-17 14:21:50 +0800764}
765
766/*
767 * this finds an available workspace or allocates a new one
768 * ERR_PTR is returned if things go bad.
769 */
770static struct list_head *find_workspace(int type)
771{
772 struct list_head *workspace;
773 int cpus = num_online_cpus();
774 int idx = type - 1;
775
776 struct list_head *idle_workspace = &comp_idle_workspace[idx];
777 spinlock_t *workspace_lock = &comp_workspace_lock[idx];
778 atomic_t *alloc_workspace = &comp_alloc_workspace[idx];
779 wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx];
780 int *num_workspace = &comp_num_workspace[idx];
781again:
782 spin_lock(workspace_lock);
783 if (!list_empty(idle_workspace)) {
784 workspace = idle_workspace->next;
785 list_del(workspace);
786 (*num_workspace)--;
787 spin_unlock(workspace_lock);
788 return workspace;
789
790 }
791 if (atomic_read(alloc_workspace) > cpus) {
792 DEFINE_WAIT(wait);
793
794 spin_unlock(workspace_lock);
795 prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
796 if (atomic_read(alloc_workspace) > cpus && !*num_workspace)
797 schedule();
798 finish_wait(workspace_wait, &wait);
799 goto again;
800 }
801 atomic_inc(alloc_workspace);
802 spin_unlock(workspace_lock);
803
804 workspace = btrfs_compress_op[idx]->alloc_workspace();
805 if (IS_ERR(workspace)) {
806 atomic_dec(alloc_workspace);
807 wake_up(workspace_wait);
808 }
809 return workspace;
810}
811
812/*
813 * put a workspace struct back on the list or free it if we have enough
814 * idle ones sitting around
815 */
816static void free_workspace(int type, struct list_head *workspace)
817{
818 int idx = type - 1;
819 struct list_head *idle_workspace = &comp_idle_workspace[idx];
820 spinlock_t *workspace_lock = &comp_workspace_lock[idx];
821 atomic_t *alloc_workspace = &comp_alloc_workspace[idx];
822 wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx];
823 int *num_workspace = &comp_num_workspace[idx];
824
825 spin_lock(workspace_lock);
826 if (*num_workspace < num_online_cpus()) {
827 list_add_tail(workspace, idle_workspace);
828 (*num_workspace)++;
829 spin_unlock(workspace_lock);
830 goto wake;
831 }
832 spin_unlock(workspace_lock);
833
834 btrfs_compress_op[idx]->free_workspace(workspace);
835 atomic_dec(alloc_workspace);
836wake:
Josef Bacik66657b32012-08-01 15:36:24 -0400837 smp_mb();
Li Zefan261507a02010-12-17 14:21:50 +0800838 if (waitqueue_active(workspace_wait))
839 wake_up(workspace_wait);
840}
841
842/*
843 * cleanup function for module exit
844 */
845static void free_workspaces(void)
846{
847 struct list_head *workspace;
848 int i;
849
850 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
851 while (!list_empty(&comp_idle_workspace[i])) {
852 workspace = comp_idle_workspace[i].next;
853 list_del(workspace);
854 btrfs_compress_op[i]->free_workspace(workspace);
855 atomic_dec(&comp_alloc_workspace[i]);
856 }
857 }
858}
859
860/*
861 * given an address space and start/len, compress the bytes.
862 *
863 * pages are allocated to hold the compressed result and stored
864 * in 'pages'
865 *
866 * out_pages is used to return the number of pages allocated. There
867 * may be pages allocated even if we return an error
868 *
869 * total_in is used to return the number of bytes actually read. It
870 * may be smaller then len if we had to exit early because we
871 * ran out of room in the pages array or because we cross the
872 * max_out threshold.
873 *
874 * total_out is used to return the total number of compressed bytes
875 *
876 * max_out tells us the max number of bytes that we're allowed to
877 * stuff into pages
878 */
879int btrfs_compress_pages(int type, struct address_space *mapping,
880 u64 start, unsigned long len,
881 struct page **pages,
882 unsigned long nr_dest_pages,
883 unsigned long *out_pages,
884 unsigned long *total_in,
885 unsigned long *total_out,
886 unsigned long max_out)
887{
888 struct list_head *workspace;
889 int ret;
890
891 workspace = find_workspace(type);
892 if (IS_ERR(workspace))
893 return -1;
894
895 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
896 start, len, pages,
897 nr_dest_pages, out_pages,
898 total_in, total_out,
899 max_out);
900 free_workspace(type, workspace);
901 return ret;
902}
903
904/*
905 * pages_in is an array of pages with compressed data.
906 *
907 * disk_start is the starting logical offset of this array in the file
908 *
909 * bvec is a bio_vec of pages from the file that we want to decompress into
910 *
911 * vcnt is the count of pages in the biovec
912 *
913 * srclen is the number of bytes in pages_in
914 *
915 * The basic idea is that we have a bio that was created by readpages.
916 * The pages in the bio are for the uncompressed data, and they may not
917 * be contiguous. They all correspond to the range of bytes covered by
918 * the compressed extent.
919 */
Eric Sandeen48a3b632013-04-25 20:41:01 +0000920static int btrfs_decompress_biovec(int type, struct page **pages_in,
921 u64 disk_start, struct bio_vec *bvec,
922 int vcnt, size_t srclen)
Li Zefan261507a02010-12-17 14:21:50 +0800923{
924 struct list_head *workspace;
925 int ret;
926
927 workspace = find_workspace(type);
928 if (IS_ERR(workspace))
929 return -ENOMEM;
930
931 ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
932 disk_start,
933 bvec, vcnt, srclen);
934 free_workspace(type, workspace);
935 return ret;
936}
937
938/*
939 * a less complex decompression routine. Our compressed data fits in a
940 * single page, and we want to read a single page out of it.
941 * start_byte tells us the offset into the compressed data we're interested in
942 */
943int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
944 unsigned long start_byte, size_t srclen, size_t destlen)
945{
946 struct list_head *workspace;
947 int ret;
948
949 workspace = find_workspace(type);
950 if (IS_ERR(workspace))
951 return -ENOMEM;
952
953 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
954 dest_page, start_byte,
955 srclen, destlen);
956
957 free_workspace(type, workspace);
958 return ret;
959}
960
Alexey Charkov8e4eef72011-02-02 21:15:35 +0000961void btrfs_exit_compress(void)
Li Zefan261507a02010-12-17 14:21:50 +0800962{
963 free_workspaces();
964}
Li Zefan3a39c182010-11-08 15:22:19 +0800965
966/*
967 * Copy uncompressed data from working buffer to pages.
968 *
969 * buf_start is the byte offset we're of the start of our workspace buffer.
970 *
971 * total_out is the last byte of the buffer
972 */
973int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
974 unsigned long total_out, u64 disk_start,
975 struct bio_vec *bvec, int vcnt,
David Sterba306e16c2011-04-19 14:29:38 +0200976 unsigned long *pg_index,
Li Zefan3a39c182010-11-08 15:22:19 +0800977 unsigned long *pg_offset)
978{
979 unsigned long buf_offset;
980 unsigned long current_buf_start;
981 unsigned long start_byte;
982 unsigned long working_bytes = total_out - buf_start;
983 unsigned long bytes;
984 char *kaddr;
David Sterba306e16c2011-04-19 14:29:38 +0200985 struct page *page_out = bvec[*pg_index].bv_page;
Li Zefan3a39c182010-11-08 15:22:19 +0800986
987 /*
988 * start byte is the first byte of the page we're currently
989 * copying into relative to the start of the compressed data.
990 */
991 start_byte = page_offset(page_out) - disk_start;
992
993 /* we haven't yet hit data corresponding to this page */
994 if (total_out <= start_byte)
995 return 1;
996
997 /*
998 * the start of the data we care about is offset into
999 * the middle of our working buffer
1000 */
1001 if (total_out > start_byte && buf_start < start_byte) {
1002 buf_offset = start_byte - buf_start;
1003 working_bytes -= buf_offset;
1004 } else {
1005 buf_offset = 0;
1006 }
1007 current_buf_start = buf_start;
1008
1009 /* copy bytes from the working buffer into the pages */
1010 while (working_bytes > 0) {
1011 bytes = min(PAGE_CACHE_SIZE - *pg_offset,
1012 PAGE_CACHE_SIZE - buf_offset);
1013 bytes = min(bytes, working_bytes);
Cong Wang7ac687d2011-11-25 23:14:28 +08001014 kaddr = kmap_atomic(page_out);
Li Zefan3a39c182010-11-08 15:22:19 +08001015 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
Cong Wang7ac687d2011-11-25 23:14:28 +08001016 kunmap_atomic(kaddr);
Li Zefan3a39c182010-11-08 15:22:19 +08001017 flush_dcache_page(page_out);
1018
1019 *pg_offset += bytes;
1020 buf_offset += bytes;
1021 working_bytes -= bytes;
1022 current_buf_start += bytes;
1023
1024 /* check if we need to pick another page */
1025 if (*pg_offset == PAGE_CACHE_SIZE) {
David Sterba306e16c2011-04-19 14:29:38 +02001026 (*pg_index)++;
1027 if (*pg_index >= vcnt)
Li Zefan3a39c182010-11-08 15:22:19 +08001028 return 0;
1029
David Sterba306e16c2011-04-19 14:29:38 +02001030 page_out = bvec[*pg_index].bv_page;
Li Zefan3a39c182010-11-08 15:22:19 +08001031 *pg_offset = 0;
1032 start_byte = page_offset(page_out) - disk_start;
1033
1034 /*
1035 * make sure our new page is covered by this
1036 * working buffer
1037 */
1038 if (total_out <= start_byte)
1039 return 1;
1040
1041 /*
1042 * the next page in the biovec might not be adjacent
1043 * to the last page, but it might still be found
1044 * inside this working buffer. bump our offset pointer
1045 */
1046 if (total_out > start_byte &&
1047 current_buf_start < start_byte) {
1048 buf_offset = start_byte - buf_start;
1049 working_bytes = total_out - start_byte;
1050 current_buf_start = buf_start + buf_offset;
1051 }
1052 }
1053 }
1054
1055 return 1;
1056}