blob: 9ce85d5e5cae768cc35373184ff2e25e158844ed [file] [log] [blame]
Chris Masonc8b97812008-10-29 14:49:59 -04001/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/bio.h>
21#include <linux/buffer_head.h>
22#include <linux/file.h>
23#include <linux/fs.h>
24#include <linux/pagemap.h>
25#include <linux/highmem.h>
26#include <linux/time.h>
27#include <linux/init.h>
28#include <linux/string.h>
Chris Masonc8b97812008-10-29 14:49:59 -040029#include <linux/backing-dev.h>
30#include <linux/mpage.h>
31#include <linux/swap.h>
32#include <linux/writeback.h>
33#include <linux/bit_spinlock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Masonc8b97812008-10-29 14:49:59 -040035#include "ctree.h"
36#include "disk-io.h"
37#include "transaction.h"
38#include "btrfs_inode.h"
39#include "volumes.h"
40#include "ordered-data.h"
Chris Masonc8b97812008-10-29 14:49:59 -040041#include "compression.h"
42#include "extent_io.h"
43#include "extent_map.h"
44
45struct compressed_bio {
46 /* number of bios pending for this compressed extent */
47 atomic_t pending_bios;
48
49 /* the pages with the compressed data on them */
50 struct page **compressed_pages;
51
52 /* inode that owns this data */
53 struct inode *inode;
54
55 /* starting offset in the inode for our pages */
56 u64 start;
57
58 /* number of bytes in the inode we're working on */
59 unsigned long len;
60
61 /* number of bytes on disk */
62 unsigned long compressed_len;
63
Li Zefan261507a02010-12-17 14:21:50 +080064 /* the compression algorithm for this bio */
65 int compress_type;
66
Chris Masonc8b97812008-10-29 14:49:59 -040067 /* number of compressed pages in the array */
68 unsigned long nr_pages;
69
70 /* IO errors */
71 int errors;
Chris Masond20f7042008-12-08 16:58:54 -050072 int mirror_num;
Chris Masonc8b97812008-10-29 14:49:59 -040073
74 /* for reads, this is the bio we are copying the data into */
75 struct bio *orig_bio;
Chris Masond20f7042008-12-08 16:58:54 -050076
77 /*
78 * the start of a variable length array of checksums only
79 * used by reads
80 */
81 u32 sums;
Chris Masonc8b97812008-10-29 14:49:59 -040082};
83
Christoph Hellwig974b1ad2016-11-25 09:07:46 +010084static int btrfs_decompress_bio(int type, struct page **pages_in,
85 u64 disk_start, struct bio *orig_bio,
86 size_t srclen);
Eric Sandeen48a3b632013-04-25 20:41:01 +000087
Jeff Mahoney2ff7e612016-06-22 18:54:24 -040088static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
Chris Masond20f7042008-12-08 16:58:54 -050089 unsigned long disk_size)
90{
Jeff Mahoney0b246af2016-06-22 18:54:23 -040091 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
David Sterba6c417612011-04-13 15:41:04 +020092
Chris Masond20f7042008-12-08 16:58:54 -050093 return sizeof(struct compressed_bio) +
Jeff Mahoney0b246af2016-06-22 18:54:23 -040094 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
Chris Masond20f7042008-12-08 16:58:54 -050095}
96
Chris Masonc8b97812008-10-29 14:49:59 -040097static struct bio *compressed_bio_alloc(struct block_device *bdev,
98 u64 first_byte, gfp_t gfp_flags)
99{
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200100 return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags);
Chris Masonc8b97812008-10-29 14:49:59 -0400101}
102
Chris Masond20f7042008-12-08 16:58:54 -0500103static int check_compressed_csum(struct inode *inode,
104 struct compressed_bio *cb,
105 u64 disk_start)
106{
107 int ret;
Chris Masond20f7042008-12-08 16:58:54 -0500108 struct page *page;
109 unsigned long i;
110 char *kaddr;
111 u32 csum;
112 u32 *cb_sum = &cb->sums;
113
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200114 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
Chris Masond20f7042008-12-08 16:58:54 -0500115 return 0;
116
117 for (i = 0; i < cb->nr_pages; i++) {
118 page = cb->compressed_pages[i];
119 csum = ~(u32)0;
120
Cong Wang7ac687d2011-11-25 23:14:28 +0800121 kaddr = kmap_atomic(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300122 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
Domagoj Tršan0b5e3da2016-10-27 08:52:33 +0100123 btrfs_csum_final(csum, (u8 *)&csum);
Cong Wang7ac687d2011-11-25 23:14:28 +0800124 kunmap_atomic(kaddr);
Chris Masond20f7042008-12-08 16:58:54 -0500125
126 if (csum != *cb_sum) {
Nikolay Borisov0970a222017-02-20 13:50:53 +0200127 btrfs_print_data_csum_error(BTRFS_I(inode),
128 disk_start, csum,
129 *cb_sum, cb->mirror_num);
Chris Masond20f7042008-12-08 16:58:54 -0500130 ret = -EIO;
131 goto fail;
132 }
133 cb_sum++;
134
135 }
136 ret = 0;
137fail:
138 return ret;
139}
140
Chris Masonc8b97812008-10-29 14:49:59 -0400141/* when we finish reading compressed pages from the disk, we
142 * decompress them and then run the bio end_io routines on the
143 * decompressed pages (in the inode address space).
144 *
145 * This allows the checksumming and other IO error handling routines
146 * to work normally
147 *
148 * The compressed pages are freed here, and it must be run
149 * in process context
150 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200151static void end_compressed_bio_read(struct bio *bio)
Chris Masonc8b97812008-10-29 14:49:59 -0400152{
Chris Masonc8b97812008-10-29 14:49:59 -0400153 struct compressed_bio *cb = bio->bi_private;
154 struct inode *inode;
155 struct page *page;
156 unsigned long index;
157 int ret;
158
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200159 if (bio->bi_error)
Chris Masonc8b97812008-10-29 14:49:59 -0400160 cb->errors = 1;
161
162 /* if there are more bios still pending for this compressed
163 * extent, just exit
164 */
165 if (!atomic_dec_and_test(&cb->pending_bios))
166 goto out;
167
Chris Masond20f7042008-12-08 16:58:54 -0500168 inode = cb->inode;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700169 ret = check_compressed_csum(inode, cb,
170 (u64)bio->bi_iter.bi_sector << 9);
Chris Masond20f7042008-12-08 16:58:54 -0500171 if (ret)
172 goto csum_failed;
173
Chris Masonc8b97812008-10-29 14:49:59 -0400174 /* ok, we're the last bio for this extent, lets start
175 * the decompression.
176 */
Christoph Hellwig974b1ad2016-11-25 09:07:46 +0100177 ret = btrfs_decompress_bio(cb->compress_type,
Li Zefan261507a02010-12-17 14:21:50 +0800178 cb->compressed_pages,
179 cb->start,
Christoph Hellwig974b1ad2016-11-25 09:07:46 +0100180 cb->orig_bio,
Li Zefan261507a02010-12-17 14:21:50 +0800181 cb->compressed_len);
Chris Masond20f7042008-12-08 16:58:54 -0500182csum_failed:
Chris Masonc8b97812008-10-29 14:49:59 -0400183 if (ret)
184 cb->errors = 1;
185
186 /* release the compressed pages */
187 index = 0;
188 for (index = 0; index < cb->nr_pages; index++) {
189 page = cb->compressed_pages[index];
190 page->mapping = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300191 put_page(page);
Chris Masonc8b97812008-10-29 14:49:59 -0400192 }
193
194 /* do io completion on the original bio */
Chris Mason771ed682008-11-06 22:02:51 -0500195 if (cb->errors) {
Chris Masonc8b97812008-10-29 14:49:59 -0400196 bio_io_error(cb->orig_bio);
Chris Masond20f7042008-12-08 16:58:54 -0500197 } else {
Kent Overstreet2c30c712013-11-07 12:20:26 -0800198 int i;
199 struct bio_vec *bvec;
Chris Masond20f7042008-12-08 16:58:54 -0500200
201 /*
202 * we have verified the checksum already, set page
203 * checked so the end_io handlers know about it
204 */
Kent Overstreet2c30c712013-11-07 12:20:26 -0800205 bio_for_each_segment_all(bvec, cb->orig_bio, i)
Chris Masond20f7042008-12-08 16:58:54 -0500206 SetPageChecked(bvec->bv_page);
Kent Overstreet2c30c712013-11-07 12:20:26 -0800207
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200208 bio_endio(cb->orig_bio);
Chris Masond20f7042008-12-08 16:58:54 -0500209 }
Chris Masonc8b97812008-10-29 14:49:59 -0400210
211 /* finally free the cb struct */
212 kfree(cb->compressed_pages);
213 kfree(cb);
214out:
215 bio_put(bio);
216}
217
218/*
219 * Clear the writeback bits on all of the file
220 * pages for a compressed write
221 */
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100222static noinline void end_compressed_writeback(struct inode *inode,
223 const struct compressed_bio *cb)
Chris Masonc8b97812008-10-29 14:49:59 -0400224{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300225 unsigned long index = cb->start >> PAGE_SHIFT;
226 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
Chris Masonc8b97812008-10-29 14:49:59 -0400227 struct page *pages[16];
228 unsigned long nr_pages = end_index - index + 1;
229 int i;
230 int ret;
231
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100232 if (cb->errors)
233 mapping_set_error(inode->i_mapping, -EIO);
234
Chris Masond3977122009-01-05 21:25:51 -0500235 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -0400236 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -0500237 min_t(unsigned long,
238 nr_pages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -0400239 if (ret == 0) {
240 nr_pages -= 1;
241 index += 1;
242 continue;
243 }
244 for (i = 0; i < ret; i++) {
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100245 if (cb->errors)
246 SetPageError(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -0400247 end_page_writeback(pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300248 put_page(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -0400249 }
250 nr_pages -= ret;
251 index += ret;
252 }
253 /* the inode may be gone now */
Chris Masonc8b97812008-10-29 14:49:59 -0400254}
255
256/*
257 * do the cleanup once all the compressed pages hit the disk.
258 * This will clear writeback on the file pages and free the compressed
259 * pages.
260 *
261 * This also calls the writeback end hooks for the file pages so that
262 * metadata and checksums can be updated in the file.
263 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200264static void end_compressed_bio_write(struct bio *bio)
Chris Masonc8b97812008-10-29 14:49:59 -0400265{
266 struct extent_io_tree *tree;
267 struct compressed_bio *cb = bio->bi_private;
268 struct inode *inode;
269 struct page *page;
270 unsigned long index;
271
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200272 if (bio->bi_error)
Chris Masonc8b97812008-10-29 14:49:59 -0400273 cb->errors = 1;
274
275 /* if there are more bios still pending for this compressed
276 * extent, just exit
277 */
278 if (!atomic_dec_and_test(&cb->pending_bios))
279 goto out;
280
281 /* ok, we're the last bio for this extent, step one is to
282 * call back into the FS and do all the end_io operations
283 */
284 inode = cb->inode;
285 tree = &BTRFS_I(inode)->io_tree;
Chris Mason70b99e62008-10-31 12:46:39 -0400286 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
Chris Masonc8b97812008-10-29 14:49:59 -0400287 tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
288 cb->start,
289 cb->start + cb->len - 1,
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100290 NULL,
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200291 bio->bi_error ? 0 : 1);
Chris Mason70b99e62008-10-31 12:46:39 -0400292 cb->compressed_pages[0]->mapping = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400293
Filipe Manana7bdcefc2014-10-07 01:48:26 +0100294 end_compressed_writeback(inode, cb);
Chris Masonc8b97812008-10-29 14:49:59 -0400295 /* note, our inode could be gone now */
296
297 /*
298 * release the compressed pages, these came from alloc_page and
299 * are not attached to the inode at all
300 */
301 index = 0;
302 for (index = 0; index < cb->nr_pages; index++) {
303 page = cb->compressed_pages[index];
304 page->mapping = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300305 put_page(page);
Chris Masonc8b97812008-10-29 14:49:59 -0400306 }
307
308 /* finally free the cb struct */
309 kfree(cb->compressed_pages);
310 kfree(cb);
311out:
312 bio_put(bio);
313}
314
315/*
316 * worker function to build and submit bios for previously compressed pages.
317 * The corresponding pages in the inode should be marked for writeback
318 * and the compressed pages should have a reference on them for dropping
319 * when the IO is complete.
320 *
321 * This also checksums the file bytes and gets things ready for
322 * the end io hooks.
323 */
324int btrfs_submit_compressed_write(struct inode *inode, u64 start,
325 unsigned long len, u64 disk_start,
326 unsigned long compressed_len,
327 struct page **compressed_pages,
328 unsigned long nr_pages)
329{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400330 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Chris Masonc8b97812008-10-29 14:49:59 -0400331 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400332 struct compressed_bio *cb;
333 unsigned long bytes_left;
334 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
David Sterba306e16c2011-04-19 14:29:38 +0200335 int pg_index = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400336 struct page *page;
337 u64 first_byte = disk_start;
338 struct block_device *bdev;
339 int ret;
Li Zefane55179b2011-07-14 03:16:47 +0000340 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
Chris Masonc8b97812008-10-29 14:49:59 -0400341
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300342 WARN_ON(start & ((u64)PAGE_SIZE - 1));
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400343 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
Yoshinori Sanodac97e52011-02-15 12:01:42 +0000344 if (!cb)
345 return -ENOMEM;
Chris Masonc8b97812008-10-29 14:49:59 -0400346 atomic_set(&cb->pending_bios, 0);
347 cb->errors = 0;
348 cb->inode = inode;
349 cb->start = start;
350 cb->len = len;
Chris Masond20f7042008-12-08 16:58:54 -0500351 cb->mirror_num = 0;
Chris Masonc8b97812008-10-29 14:49:59 -0400352 cb->compressed_pages = compressed_pages;
353 cb->compressed_len = compressed_len;
354 cb->orig_bio = NULL;
355 cb->nr_pages = nr_pages;
356
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400357 bdev = fs_info->fs_devices->latest_bdev;
Chris Masonc8b97812008-10-29 14:49:59 -0400358
Chris Masonc8b97812008-10-29 14:49:59 -0400359 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
Dulshani Gunawardhana67871252013-10-31 10:33:04 +0530360 if (!bio) {
Yoshinori Sanodac97e52011-02-15 12:01:42 +0000361 kfree(cb);
362 return -ENOMEM;
363 }
Mike Christie37226b22016-06-05 14:31:52 -0500364 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400365 bio->bi_private = cb;
366 bio->bi_end_io = end_compressed_bio_write;
367 atomic_inc(&cb->pending_bios);
368
369 /* create and submit bios for the compressed pages */
370 bytes_left = compressed_len;
David Sterba306e16c2011-04-19 14:29:38 +0200371 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
372 page = compressed_pages[pg_index];
Chris Masonc8b97812008-10-29 14:49:59 -0400373 page->mapping = inode->i_mapping;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700374 if (bio->bi_iter.bi_size)
Mike Christie81a75f62016-06-05 14:31:54 -0500375 ret = io_tree->ops->merge_bio_hook(page, 0,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300376 PAGE_SIZE,
Chris Masonc8b97812008-10-29 14:49:59 -0400377 bio, 0);
378 else
379 ret = 0;
380
Chris Mason70b99e62008-10-31 12:46:39 -0400381 page->mapping = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300382 if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
383 PAGE_SIZE) {
Chris Masonc8b97812008-10-29 14:49:59 -0400384 bio_get(bio);
385
Chris Masonaf09abf2008-11-07 12:35:44 -0500386 /*
387 * inc the count before we submit the bio so
388 * we know the end IO handler won't happen before
389 * we inc the count. Otherwise, the cb might get
390 * freed before we're done setting it up
391 */
392 atomic_inc(&cb->pending_bios);
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400393 ret = btrfs_bio_wq_end_io(fs_info, bio,
394 BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100395 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400396
Li Zefane55179b2011-07-14 03:16:47 +0000397 if (!skip_sum) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400398 ret = btrfs_csum_one_bio(inode, bio, start, 1);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100399 BUG_ON(ret); /* -ENOMEM */
Li Zefane55179b2011-07-14 03:16:47 +0000400 }
Chris Masond20f7042008-12-08 16:58:54 -0500401
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400402 ret = btrfs_map_bio(fs_info, bio, 0, 1);
Liu Bof5daf2c2016-06-22 18:32:06 -0700403 if (ret) {
404 bio->bi_error = ret;
405 bio_endio(bio);
406 }
Chris Masonc8b97812008-10-29 14:49:59 -0400407
408 bio_put(bio);
409
410 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
Tsutomu Itohe627ee72012-04-12 16:03:56 -0400411 BUG_ON(!bio);
Mike Christie37226b22016-06-05 14:31:52 -0500412 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400413 bio->bi_private = cb;
414 bio->bi_end_io = end_compressed_bio_write;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300415 bio_add_page(bio, page, PAGE_SIZE, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400416 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300417 if (bytes_left < PAGE_SIZE) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400418 btrfs_info(fs_info,
Frank Holtonefe120a2013-12-20 11:37:06 -0500419 "bytes left %lu compress len %lu nr %lu",
Chris Masoncfbc2462008-10-30 13:22:14 -0400420 bytes_left, cb->compressed_len, cb->nr_pages);
421 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300422 bytes_left -= PAGE_SIZE;
423 first_byte += PAGE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -0500424 cond_resched();
Chris Masonc8b97812008-10-29 14:49:59 -0400425 }
426 bio_get(bio);
427
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400428 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100429 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400430
Li Zefane55179b2011-07-14 03:16:47 +0000431 if (!skip_sum) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400432 ret = btrfs_csum_one_bio(inode, bio, start, 1);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100433 BUG_ON(ret); /* -ENOMEM */
Li Zefane55179b2011-07-14 03:16:47 +0000434 }
Chris Masond20f7042008-12-08 16:58:54 -0500435
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400436 ret = btrfs_map_bio(fs_info, bio, 0, 1);
Liu Bof5daf2c2016-06-22 18:32:06 -0700437 if (ret) {
438 bio->bi_error = ret;
439 bio_endio(bio);
440 }
Chris Masonc8b97812008-10-29 14:49:59 -0400441
442 bio_put(bio);
443 return 0;
444}
445
Christoph Hellwig2a4d0c92016-11-25 09:07:51 +0100446static u64 bio_end_offset(struct bio *bio)
447{
448 struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1];
449
450 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
451}
452
Chris Mason771ed682008-11-06 22:02:51 -0500453static noinline int add_ra_bio_pages(struct inode *inode,
454 u64 compressed_end,
455 struct compressed_bio *cb)
456{
457 unsigned long end_index;
David Sterba306e16c2011-04-19 14:29:38 +0200458 unsigned long pg_index;
Chris Mason771ed682008-11-06 22:02:51 -0500459 u64 last_offset;
460 u64 isize = i_size_read(inode);
461 int ret;
462 struct page *page;
463 unsigned long nr_pages = 0;
464 struct extent_map *em;
465 struct address_space *mapping = inode->i_mapping;
Chris Mason771ed682008-11-06 22:02:51 -0500466 struct extent_map_tree *em_tree;
467 struct extent_io_tree *tree;
468 u64 end;
469 int misses = 0;
470
Christoph Hellwig2a4d0c92016-11-25 09:07:51 +0100471 last_offset = bio_end_offset(cb->orig_bio);
Chris Mason771ed682008-11-06 22:02:51 -0500472 em_tree = &BTRFS_I(inode)->extent_tree;
473 tree = &BTRFS_I(inode)->io_tree;
474
475 if (isize == 0)
476 return 0;
477
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300478 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -0500479
Chris Masond3977122009-01-05 21:25:51 -0500480 while (last_offset < compressed_end) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300481 pg_index = last_offset >> PAGE_SHIFT;
Chris Mason771ed682008-11-06 22:02:51 -0500482
David Sterba306e16c2011-04-19 14:29:38 +0200483 if (pg_index > end_index)
Chris Mason771ed682008-11-06 22:02:51 -0500484 break;
485
486 rcu_read_lock();
David Sterba306e16c2011-04-19 14:29:38 +0200487 page = radix_tree_lookup(&mapping->page_tree, pg_index);
Chris Mason771ed682008-11-06 22:02:51 -0500488 rcu_read_unlock();
Johannes Weiner0cd61442014-04-03 14:47:46 -0700489 if (page && !radix_tree_exceptional_entry(page)) {
Chris Mason771ed682008-11-06 22:02:51 -0500490 misses++;
491 if (misses > 4)
492 break;
493 goto next;
494 }
495
Michal Hockoc62d2552015-11-06 16:28:49 -0800496 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
497 ~__GFP_FS));
Chris Mason771ed682008-11-06 22:02:51 -0500498 if (!page)
499 break;
500
Michal Hockoc62d2552015-11-06 16:28:49 -0800501 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300502 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500503 goto next;
504 }
505
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300506 end = last_offset + PAGE_SIZE - 1;
Chris Mason771ed682008-11-06 22:02:51 -0500507 /*
508 * at this point, we have a locked page in the page cache
509 * for these bytes in the file. But, we have to make
510 * sure they map to this compressed extent on disk.
511 */
512 set_page_extent_mapped(page);
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100513 lock_extent(tree, last_offset, end);
Chris Mason890871b2009-09-02 16:24:52 -0400514 read_lock(&em_tree->lock);
Chris Mason771ed682008-11-06 22:02:51 -0500515 em = lookup_extent_mapping(em_tree, last_offset,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300516 PAGE_SIZE);
Chris Mason890871b2009-09-02 16:24:52 -0400517 read_unlock(&em_tree->lock);
Chris Mason771ed682008-11-06 22:02:51 -0500518
519 if (!em || last_offset < em->start ||
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300520 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
Kent Overstreet4f024f32013-10-11 15:44:27 -0700521 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
Chris Mason771ed682008-11-06 22:02:51 -0500522 free_extent_map(em);
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100523 unlock_extent(tree, last_offset, end);
Chris Mason771ed682008-11-06 22:02:51 -0500524 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300525 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500526 break;
527 }
528 free_extent_map(em);
529
530 if (page->index == end_index) {
531 char *userpage;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300532 size_t zero_offset = isize & (PAGE_SIZE - 1);
Chris Mason771ed682008-11-06 22:02:51 -0500533
534 if (zero_offset) {
535 int zeros;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300536 zeros = PAGE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +0800537 userpage = kmap_atomic(page);
Chris Mason771ed682008-11-06 22:02:51 -0500538 memset(userpage + zero_offset, 0, zeros);
539 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +0800540 kunmap_atomic(userpage);
Chris Mason771ed682008-11-06 22:02:51 -0500541 }
542 }
543
544 ret = bio_add_page(cb->orig_bio, page,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300545 PAGE_SIZE, 0);
Chris Mason771ed682008-11-06 22:02:51 -0500546
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300547 if (ret == PAGE_SIZE) {
Chris Mason771ed682008-11-06 22:02:51 -0500548 nr_pages++;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300549 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500550 } else {
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100551 unlock_extent(tree, last_offset, end);
Chris Mason771ed682008-11-06 22:02:51 -0500552 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300553 put_page(page);
Chris Mason771ed682008-11-06 22:02:51 -0500554 break;
555 }
556next:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300557 last_offset += PAGE_SIZE;
Chris Mason771ed682008-11-06 22:02:51 -0500558 }
Chris Mason771ed682008-11-06 22:02:51 -0500559 return 0;
560}
561
Chris Masonc8b97812008-10-29 14:49:59 -0400562/*
563 * for a compressed read, the bio we get passed has all the inode pages
564 * in it. We don't actually do IO on those pages but allocate new ones
565 * to hold the compressed pages on disk.
566 *
Kent Overstreet4f024f32013-10-11 15:44:27 -0700567 * bio->bi_iter.bi_sector points to the compressed extent on disk
Chris Masonc8b97812008-10-29 14:49:59 -0400568 * bio->bi_io_vec points to all of the inode pages
Chris Masonc8b97812008-10-29 14:49:59 -0400569 *
570 * After the compressed pages are read, we copy the bytes into the
571 * bio we were passed and then call the bio end_io calls
572 */
573int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
574 int mirror_num, unsigned long bio_flags)
575{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400576 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Chris Masonc8b97812008-10-29 14:49:59 -0400577 struct extent_io_tree *tree;
578 struct extent_map_tree *em_tree;
579 struct compressed_bio *cb;
Chris Masonc8b97812008-10-29 14:49:59 -0400580 unsigned long compressed_len;
581 unsigned long nr_pages;
David Sterba306e16c2011-04-19 14:29:38 +0200582 unsigned long pg_index;
Chris Masonc8b97812008-10-29 14:49:59 -0400583 struct page *page;
584 struct block_device *bdev;
585 struct bio *comp_bio;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700586 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
Chris Masone04ca622008-11-10 11:44:58 -0500587 u64 em_len;
588 u64 em_start;
Chris Masonc8b97812008-10-29 14:49:59 -0400589 struct extent_map *em;
liubo6b82ce82011-01-26 06:21:39 +0000590 int ret = -ENOMEM;
Josef Bacik15e30042012-10-05 13:39:50 -0400591 int faili = 0;
Chris Masond20f7042008-12-08 16:58:54 -0500592 u32 *sums;
Chris Masonc8b97812008-10-29 14:49:59 -0400593
594 tree = &BTRFS_I(inode)->io_tree;
595 em_tree = &BTRFS_I(inode)->extent_tree;
596
597 /* we need the actual starting offset of this extent in the file */
Chris Mason890871b2009-09-02 16:24:52 -0400598 read_lock(&em_tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -0400599 em = lookup_extent_mapping(em_tree,
600 page_offset(bio->bi_io_vec->bv_page),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300601 PAGE_SIZE);
Chris Mason890871b2009-09-02 16:24:52 -0400602 read_unlock(&em_tree->lock);
Tsutomu Itoh285190d2012-02-16 16:23:58 +0900603 if (!em)
604 return -EIO;
Chris Masonc8b97812008-10-29 14:49:59 -0400605
Chris Masond20f7042008-12-08 16:58:54 -0500606 compressed_len = em->block_len;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400607 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
liubo6b82ce82011-01-26 06:21:39 +0000608 if (!cb)
609 goto out;
610
Chris Masonc8b97812008-10-29 14:49:59 -0400611 atomic_set(&cb->pending_bios, 0);
612 cb->errors = 0;
613 cb->inode = inode;
Chris Masond20f7042008-12-08 16:58:54 -0500614 cb->mirror_num = mirror_num;
615 sums = &cb->sums;
Chris Masonc8b97812008-10-29 14:49:59 -0400616
Yan Zhengff5b7ee2008-11-10 07:34:43 -0500617 cb->start = em->orig_start;
Chris Masone04ca622008-11-10 11:44:58 -0500618 em_len = em->len;
619 em_start = em->start;
Chris Masond20f7042008-12-08 16:58:54 -0500620
Chris Masonc8b97812008-10-29 14:49:59 -0400621 free_extent_map(em);
Chris Masone04ca622008-11-10 11:44:58 -0500622 em = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -0400623
Christoph Hellwig81381052016-11-25 09:07:50 +0100624 cb->len = bio->bi_iter.bi_size;
Chris Masonc8b97812008-10-29 14:49:59 -0400625 cb->compressed_len = compressed_len;
Li Zefan261507a02010-12-17 14:21:50 +0800626 cb->compress_type = extent_compress_type(bio_flags);
Chris Masonc8b97812008-10-29 14:49:59 -0400627 cb->orig_bio = bio;
628
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300629 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
David Sterba31e818f2015-02-20 18:00:26 +0100630 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
Chris Masonc8b97812008-10-29 14:49:59 -0400631 GFP_NOFS);
liubo6b82ce82011-01-26 06:21:39 +0000632 if (!cb->compressed_pages)
633 goto fail1;
634
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400635 bdev = fs_info->fs_devices->latest_bdev;
Chris Masonc8b97812008-10-29 14:49:59 -0400636
David Sterba306e16c2011-04-19 14:29:38 +0200637 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
638 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
Chris Masonc8b97812008-10-29 14:49:59 -0400639 __GFP_HIGHMEM);
Josef Bacik15e30042012-10-05 13:39:50 -0400640 if (!cb->compressed_pages[pg_index]) {
641 faili = pg_index - 1;
642 ret = -ENOMEM;
liubo6b82ce82011-01-26 06:21:39 +0000643 goto fail2;
Josef Bacik15e30042012-10-05 13:39:50 -0400644 }
Chris Masonc8b97812008-10-29 14:49:59 -0400645 }
Josef Bacik15e30042012-10-05 13:39:50 -0400646 faili = nr_pages - 1;
Chris Masonc8b97812008-10-29 14:49:59 -0400647 cb->nr_pages = nr_pages;
648
Filipe Manana7f042a82016-01-27 19:17:20 +0000649 add_ra_bio_pages(inode, em_start + em_len, cb);
Chris Mason771ed682008-11-06 22:02:51 -0500650
Chris Mason771ed682008-11-06 22:02:51 -0500651 /* include any pages we added in add_ra-bio_pages */
Christoph Hellwig81381052016-11-25 09:07:50 +0100652 cb->len = bio->bi_iter.bi_size;
Chris Mason771ed682008-11-06 22:02:51 -0500653
Chris Masonc8b97812008-10-29 14:49:59 -0400654 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
liubo6b82ce82011-01-26 06:21:39 +0000655 if (!comp_bio)
656 goto fail2;
Mike Christie37226b22016-06-05 14:31:52 -0500657 bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400658 comp_bio->bi_private = cb;
659 comp_bio->bi_end_io = end_compressed_bio_read;
660 atomic_inc(&cb->pending_bios);
661
David Sterba306e16c2011-04-19 14:29:38 +0200662 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
663 page = cb->compressed_pages[pg_index];
Chris Masonc8b97812008-10-29 14:49:59 -0400664 page->mapping = inode->i_mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300665 page->index = em_start >> PAGE_SHIFT;
Chris Masond20f7042008-12-08 16:58:54 -0500666
Kent Overstreet4f024f32013-10-11 15:44:27 -0700667 if (comp_bio->bi_iter.bi_size)
Mike Christie81a75f62016-06-05 14:31:54 -0500668 ret = tree->ops->merge_bio_hook(page, 0,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300669 PAGE_SIZE,
Chris Masonc8b97812008-10-29 14:49:59 -0400670 comp_bio, 0);
671 else
672 ret = 0;
673
Chris Mason70b99e62008-10-31 12:46:39 -0400674 page->mapping = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300675 if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
676 PAGE_SIZE) {
Chris Masonc8b97812008-10-29 14:49:59 -0400677 bio_get(comp_bio);
678
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400679 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
680 BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100681 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400682
Chris Masonaf09abf2008-11-07 12:35:44 -0500683 /*
684 * inc the count before we submit the bio so
685 * we know the end IO handler won't happen before
686 * we inc the count. Otherwise, the cb might get
687 * freed before we're done setting it up
688 */
689 atomic_inc(&cb->pending_bios);
690
Christoph Hellwig6cbff002009-04-17 10:37:41 +0200691 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400692 ret = btrfs_lookup_bio_sums(inode, comp_bio,
693 sums);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100694 BUG_ON(ret); /* -ENOMEM */
Chris Masond20f7042008-12-08 16:58:54 -0500695 }
David Sterbaed6078f2014-06-05 01:59:57 +0200696 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400697 fs_info->sectorsize);
Chris Masond20f7042008-12-08 16:58:54 -0500698
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400699 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200700 if (ret) {
Junjie Mao14155ca2016-10-17 09:20:25 +0800701 comp_bio->bi_error = ret;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200702 bio_endio(comp_bio);
703 }
Chris Masonc8b97812008-10-29 14:49:59 -0400704
705 bio_put(comp_bio);
706
707 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
708 GFP_NOFS);
Tsutomu Itohe627ee72012-04-12 16:03:56 -0400709 BUG_ON(!comp_bio);
Mike Christie37226b22016-06-05 14:31:52 -0500710 bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
Chris Mason771ed682008-11-06 22:02:51 -0500711 comp_bio->bi_private = cb;
712 comp_bio->bi_end_io = end_compressed_bio_read;
713
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300714 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
Chris Masonc8b97812008-10-29 14:49:59 -0400715 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300716 cur_disk_byte += PAGE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -0400717 }
718 bio_get(comp_bio);
719
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400720 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100721 BUG_ON(ret); /* -ENOMEM */
Chris Masonc8b97812008-10-29 14:49:59 -0400722
Tsutomu Itohc2db1072011-03-01 06:48:31 +0000723 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400724 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100725 BUG_ON(ret); /* -ENOMEM */
Tsutomu Itohc2db1072011-03-01 06:48:31 +0000726 }
Chris Masond20f7042008-12-08 16:58:54 -0500727
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400728 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200729 if (ret) {
Junjie Mao14155ca2016-10-17 09:20:25 +0800730 comp_bio->bi_error = ret;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200731 bio_endio(comp_bio);
732 }
Chris Masonc8b97812008-10-29 14:49:59 -0400733
734 bio_put(comp_bio);
735 return 0;
liubo6b82ce82011-01-26 06:21:39 +0000736
737fail2:
Josef Bacik15e30042012-10-05 13:39:50 -0400738 while (faili >= 0) {
739 __free_page(cb->compressed_pages[faili]);
740 faili--;
741 }
liubo6b82ce82011-01-26 06:21:39 +0000742
743 kfree(cb->compressed_pages);
744fail1:
745 kfree(cb);
746out:
747 free_extent_map(em);
748 return ret;
Chris Masonc8b97812008-10-29 14:49:59 -0400749}
Li Zefan261507a02010-12-17 14:21:50 +0800750
Byongho Leed9187642015-10-14 14:05:24 +0900751static struct {
752 struct list_head idle_ws;
753 spinlock_t ws_lock;
David Sterba6ac10a62016-04-27 02:15:15 +0200754 /* Number of free workspaces */
755 int free_ws;
756 /* Total number of allocated workspaces */
757 atomic_t total_ws;
758 /* Waiters for a free workspace */
Byongho Leed9187642015-10-14 14:05:24 +0900759 wait_queue_head_t ws_wait;
760} btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
Li Zefan261507a02010-12-17 14:21:50 +0800761
David Sterbae8c9f182015-01-02 18:23:10 +0100762static const struct btrfs_compress_op * const btrfs_compress_op[] = {
Li Zefan261507a02010-12-17 14:21:50 +0800763 &btrfs_zlib_compress,
Li Zefana6fa6fa2010-10-25 15:12:26 +0800764 &btrfs_lzo_compress,
Li Zefan261507a02010-12-17 14:21:50 +0800765};
766
Jeff Mahoney143bede2012-03-01 14:56:26 +0100767void __init btrfs_init_compress(void)
Li Zefan261507a02010-12-17 14:21:50 +0800768{
769 int i;
770
771 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
David Sterbaf77dd0d2016-04-27 02:55:15 +0200772 struct list_head *workspace;
773
Byongho Leed9187642015-10-14 14:05:24 +0900774 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
775 spin_lock_init(&btrfs_comp_ws[i].ws_lock);
David Sterba6ac10a62016-04-27 02:15:15 +0200776 atomic_set(&btrfs_comp_ws[i].total_ws, 0);
Byongho Leed9187642015-10-14 14:05:24 +0900777 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
David Sterbaf77dd0d2016-04-27 02:55:15 +0200778
779 /*
780 * Preallocate one workspace for each compression type so
781 * we can guarantee forward progress in the worst case
782 */
783 workspace = btrfs_compress_op[i]->alloc_workspace();
784 if (IS_ERR(workspace)) {
Jeff Mahoney62e85572016-09-20 10:05:01 -0400785 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
David Sterbaf77dd0d2016-04-27 02:55:15 +0200786 } else {
787 atomic_set(&btrfs_comp_ws[i].total_ws, 1);
788 btrfs_comp_ws[i].free_ws = 1;
789 list_add(workspace, &btrfs_comp_ws[i].idle_ws);
790 }
Li Zefan261507a02010-12-17 14:21:50 +0800791 }
Li Zefan261507a02010-12-17 14:21:50 +0800792}
793
794/*
David Sterbae721e492016-04-27 02:41:17 +0200795 * This finds an available workspace or allocates a new one.
796 * If it's not possible to allocate a new one, waits until there's one.
797 * Preallocation makes a forward progress guarantees and we do not return
798 * errors.
Li Zefan261507a02010-12-17 14:21:50 +0800799 */
800static struct list_head *find_workspace(int type)
801{
802 struct list_head *workspace;
803 int cpus = num_online_cpus();
804 int idx = type - 1;
805
Byongho Leed9187642015-10-14 14:05:24 +0900806 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
807 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
David Sterba6ac10a62016-04-27 02:15:15 +0200808 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
Byongho Leed9187642015-10-14 14:05:24 +0900809 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
David Sterba6ac10a62016-04-27 02:15:15 +0200810 int *free_ws = &btrfs_comp_ws[idx].free_ws;
Li Zefan261507a02010-12-17 14:21:50 +0800811again:
Byongho Leed9187642015-10-14 14:05:24 +0900812 spin_lock(ws_lock);
813 if (!list_empty(idle_ws)) {
814 workspace = idle_ws->next;
Li Zefan261507a02010-12-17 14:21:50 +0800815 list_del(workspace);
David Sterba6ac10a62016-04-27 02:15:15 +0200816 (*free_ws)--;
Byongho Leed9187642015-10-14 14:05:24 +0900817 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +0800818 return workspace;
819
820 }
David Sterba6ac10a62016-04-27 02:15:15 +0200821 if (atomic_read(total_ws) > cpus) {
Li Zefan261507a02010-12-17 14:21:50 +0800822 DEFINE_WAIT(wait);
823
Byongho Leed9187642015-10-14 14:05:24 +0900824 spin_unlock(ws_lock);
825 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
David Sterba6ac10a62016-04-27 02:15:15 +0200826 if (atomic_read(total_ws) > cpus && !*free_ws)
Li Zefan261507a02010-12-17 14:21:50 +0800827 schedule();
Byongho Leed9187642015-10-14 14:05:24 +0900828 finish_wait(ws_wait, &wait);
Li Zefan261507a02010-12-17 14:21:50 +0800829 goto again;
830 }
David Sterba6ac10a62016-04-27 02:15:15 +0200831 atomic_inc(total_ws);
Byongho Leed9187642015-10-14 14:05:24 +0900832 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +0800833
834 workspace = btrfs_compress_op[idx]->alloc_workspace();
835 if (IS_ERR(workspace)) {
David Sterba6ac10a62016-04-27 02:15:15 +0200836 atomic_dec(total_ws);
Byongho Leed9187642015-10-14 14:05:24 +0900837 wake_up(ws_wait);
David Sterbae721e492016-04-27 02:41:17 +0200838
839 /*
840 * Do not return the error but go back to waiting. There's a
841 * workspace preallocated for each type and the compression
842 * time is bounded so we get to a workspace eventually. This
843 * makes our caller's life easier.
David Sterba523567162016-04-27 03:07:39 +0200844 *
845 * To prevent silent and low-probability deadlocks (when the
846 * initial preallocation fails), check if there are any
847 * workspaces at all.
David Sterbae721e492016-04-27 02:41:17 +0200848 */
David Sterba523567162016-04-27 03:07:39 +0200849 if (atomic_read(total_ws) == 0) {
850 static DEFINE_RATELIMIT_STATE(_rs,
851 /* once per minute */ 60 * HZ,
852 /* no burst */ 1);
853
854 if (__ratelimit(&_rs)) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -0400855 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
David Sterba523567162016-04-27 03:07:39 +0200856 }
857 }
David Sterbae721e492016-04-27 02:41:17 +0200858 goto again;
Li Zefan261507a02010-12-17 14:21:50 +0800859 }
860 return workspace;
861}
862
863/*
864 * put a workspace struct back on the list or free it if we have enough
865 * idle ones sitting around
866 */
867static void free_workspace(int type, struct list_head *workspace)
868{
869 int idx = type - 1;
Byongho Leed9187642015-10-14 14:05:24 +0900870 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
871 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
David Sterba6ac10a62016-04-27 02:15:15 +0200872 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
Byongho Leed9187642015-10-14 14:05:24 +0900873 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
David Sterba6ac10a62016-04-27 02:15:15 +0200874 int *free_ws = &btrfs_comp_ws[idx].free_ws;
Li Zefan261507a02010-12-17 14:21:50 +0800875
Byongho Leed9187642015-10-14 14:05:24 +0900876 spin_lock(ws_lock);
David Sterba6ac10a62016-04-27 02:15:15 +0200877 if (*free_ws < num_online_cpus()) {
Byongho Leed9187642015-10-14 14:05:24 +0900878 list_add(workspace, idle_ws);
David Sterba6ac10a62016-04-27 02:15:15 +0200879 (*free_ws)++;
Byongho Leed9187642015-10-14 14:05:24 +0900880 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +0800881 goto wake;
882 }
Byongho Leed9187642015-10-14 14:05:24 +0900883 spin_unlock(ws_lock);
Li Zefan261507a02010-12-17 14:21:50 +0800884
885 btrfs_compress_op[idx]->free_workspace(workspace);
David Sterba6ac10a62016-04-27 02:15:15 +0200886 atomic_dec(total_ws);
Li Zefan261507a02010-12-17 14:21:50 +0800887wake:
David Sterbaa83342a2015-02-16 19:36:47 +0100888 /*
889 * Make sure counter is updated before we wake up waiters.
890 */
Josef Bacik66657b32012-08-01 15:36:24 -0400891 smp_mb();
Byongho Leed9187642015-10-14 14:05:24 +0900892 if (waitqueue_active(ws_wait))
893 wake_up(ws_wait);
Li Zefan261507a02010-12-17 14:21:50 +0800894}
895
896/*
897 * cleanup function for module exit
898 */
899static void free_workspaces(void)
900{
901 struct list_head *workspace;
902 int i;
903
904 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
Byongho Leed9187642015-10-14 14:05:24 +0900905 while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
906 workspace = btrfs_comp_ws[i].idle_ws.next;
Li Zefan261507a02010-12-17 14:21:50 +0800907 list_del(workspace);
908 btrfs_compress_op[i]->free_workspace(workspace);
David Sterba6ac10a62016-04-27 02:15:15 +0200909 atomic_dec(&btrfs_comp_ws[i].total_ws);
Li Zefan261507a02010-12-17 14:21:50 +0800910 }
911 }
912}
913
914/*
915 * given an address space and start/len, compress the bytes.
916 *
917 * pages are allocated to hold the compressed result and stored
918 * in 'pages'
919 *
920 * out_pages is used to return the number of pages allocated. There
921 * may be pages allocated even if we return an error
922 *
923 * total_in is used to return the number of bytes actually read. It
924 * may be smaller then len if we had to exit early because we
925 * ran out of room in the pages array or because we cross the
926 * max_out threshold.
927 *
928 * total_out is used to return the total number of compressed bytes
929 *
930 * max_out tells us the max number of bytes that we're allowed to
931 * stuff into pages
932 */
933int btrfs_compress_pages(int type, struct address_space *mapping,
934 u64 start, unsigned long len,
935 struct page **pages,
936 unsigned long nr_dest_pages,
937 unsigned long *out_pages,
938 unsigned long *total_in,
939 unsigned long *total_out,
940 unsigned long max_out)
941{
942 struct list_head *workspace;
943 int ret;
944
945 workspace = find_workspace(type);
Li Zefan261507a02010-12-17 14:21:50 +0800946
947 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
948 start, len, pages,
949 nr_dest_pages, out_pages,
950 total_in, total_out,
951 max_out);
952 free_workspace(type, workspace);
953 return ret;
954}
955
956/*
957 * pages_in is an array of pages with compressed data.
958 *
959 * disk_start is the starting logical offset of this array in the file
960 *
Christoph Hellwig974b1ad2016-11-25 09:07:46 +0100961 * orig_bio contains the pages from the file that we want to decompress into
Li Zefan261507a02010-12-17 14:21:50 +0800962 *
963 * srclen is the number of bytes in pages_in
964 *
965 * The basic idea is that we have a bio that was created by readpages.
966 * The pages in the bio are for the uncompressed data, and they may not
967 * be contiguous. They all correspond to the range of bytes covered by
968 * the compressed extent.
969 */
Christoph Hellwig974b1ad2016-11-25 09:07:46 +0100970static int btrfs_decompress_bio(int type, struct page **pages_in,
971 u64 disk_start, struct bio *orig_bio,
972 size_t srclen)
Li Zefan261507a02010-12-17 14:21:50 +0800973{
974 struct list_head *workspace;
975 int ret;
976
977 workspace = find_workspace(type);
Li Zefan261507a02010-12-17 14:21:50 +0800978
Christoph Hellwig974b1ad2016-11-25 09:07:46 +0100979 ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in,
980 disk_start, orig_bio,
981 srclen);
Li Zefan261507a02010-12-17 14:21:50 +0800982 free_workspace(type, workspace);
983 return ret;
984}
985
986/*
987 * a less complex decompression routine. Our compressed data fits in a
988 * single page, and we want to read a single page out of it.
989 * start_byte tells us the offset into the compressed data we're interested in
990 */
991int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
992 unsigned long start_byte, size_t srclen, size_t destlen)
993{
994 struct list_head *workspace;
995 int ret;
996
997 workspace = find_workspace(type);
Li Zefan261507a02010-12-17 14:21:50 +0800998
999 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
1000 dest_page, start_byte,
1001 srclen, destlen);
1002
1003 free_workspace(type, workspace);
1004 return ret;
1005}
1006
Alexey Charkov8e4eef7a2011-02-02 21:15:35 +00001007void btrfs_exit_compress(void)
Li Zefan261507a02010-12-17 14:21:50 +08001008{
1009 free_workspaces();
1010}
Li Zefan3a39c182010-11-08 15:22:19 +08001011
1012/*
1013 * Copy uncompressed data from working buffer to pages.
1014 *
1015 * buf_start is the byte offset we're of the start of our workspace buffer.
1016 *
1017 * total_out is the last byte of the buffer
1018 */
1019int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1020 unsigned long total_out, u64 disk_start,
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001021 struct bio *bio)
Li Zefan3a39c182010-11-08 15:22:19 +08001022{
1023 unsigned long buf_offset;
1024 unsigned long current_buf_start;
1025 unsigned long start_byte;
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001026 unsigned long prev_start_byte;
Li Zefan3a39c182010-11-08 15:22:19 +08001027 unsigned long working_bytes = total_out - buf_start;
1028 unsigned long bytes;
1029 char *kaddr;
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001030 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
Li Zefan3a39c182010-11-08 15:22:19 +08001031
1032 /*
1033 * start byte is the first byte of the page we're currently
1034 * copying into relative to the start of the compressed data.
1035 */
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001036 start_byte = page_offset(bvec.bv_page) - disk_start;
Li Zefan3a39c182010-11-08 15:22:19 +08001037
1038 /* we haven't yet hit data corresponding to this page */
1039 if (total_out <= start_byte)
1040 return 1;
1041
1042 /*
1043 * the start of the data we care about is offset into
1044 * the middle of our working buffer
1045 */
1046 if (total_out > start_byte && buf_start < start_byte) {
1047 buf_offset = start_byte - buf_start;
1048 working_bytes -= buf_offset;
1049 } else {
1050 buf_offset = 0;
1051 }
1052 current_buf_start = buf_start;
1053
1054 /* copy bytes from the working buffer into the pages */
1055 while (working_bytes > 0) {
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001056 bytes = min_t(unsigned long, bvec.bv_len,
1057 PAGE_SIZE - buf_offset);
Li Zefan3a39c182010-11-08 15:22:19 +08001058 bytes = min(bytes, working_bytes);
Li Zefan3a39c182010-11-08 15:22:19 +08001059
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001060 kaddr = kmap_atomic(bvec.bv_page);
1061 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1062 kunmap_atomic(kaddr);
1063 flush_dcache_page(bvec.bv_page);
1064
Li Zefan3a39c182010-11-08 15:22:19 +08001065 buf_offset += bytes;
1066 working_bytes -= bytes;
1067 current_buf_start += bytes;
1068
1069 /* check if we need to pick another page */
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001070 bio_advance(bio, bytes);
1071 if (!bio->bi_iter.bi_size)
1072 return 0;
1073 bvec = bio_iter_iovec(bio, bio->bi_iter);
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001074 prev_start_byte = start_byte;
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001075 start_byte = page_offset(bvec.bv_page) - disk_start;
Li Zefan3a39c182010-11-08 15:22:19 +08001076
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001077 /*
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001078 * We need to make sure we're only adjusting
1079 * our offset into compression working buffer when
1080 * we're switching pages. Otherwise we can incorrectly
1081 * keep copying when we were actually done.
Christoph Hellwig974b1ad2016-11-25 09:07:46 +01001082 */
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001083 if (start_byte != prev_start_byte) {
1084 /*
1085 * make sure our new page is covered by this
1086 * working buffer
1087 */
1088 if (total_out <= start_byte)
1089 return 1;
Li Zefan3a39c182010-11-08 15:22:19 +08001090
Omar Sandoval6e78b3f2017-02-10 15:03:35 -08001091 /*
1092 * the next page in the biovec might not be adjacent
1093 * to the last page, but it might still be found
1094 * inside this working buffer. bump our offset pointer
1095 */
1096 if (total_out > start_byte &&
1097 current_buf_start < start_byte) {
1098 buf_offset = start_byte - buf_start;
1099 working_bytes = total_out - start_byte;
1100 current_buf_start = buf_start + buf_offset;
1101 }
Li Zefan3a39c182010-11-08 15:22:19 +08001102 }
1103 }
1104
1105 return 1;
1106}