blob: c2d4f25417f2e9029bc9e16ded55241d56a38503 [file] [log] [blame]
Arne Jansena2de7332011-03-08 14:14:00 +01001/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
Arne Jansena2de7332011-03-08 14:14:00 +01003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
Arne Jansena2de7332011-03-08 14:14:00 +010019#include <linux/blkdev.h>
Jan Schmidt558540c2011-06-13 19:59:12 +020020#include <linux/ratelimit.h>
Arne Jansena2de7332011-03-08 14:14:00 +010021#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
Jan Schmidt0ef8e452011-06-13 20:04:15 +020025#include "transaction.h"
Jan Schmidt558540c2011-06-13 19:59:12 +020026#include "backref.h"
Jan Schmidt5da6fcb2011-08-04 18:11:04 +020027#include "extent_io.h"
Stefan Behrensff023aa2012-11-06 11:43:11 +010028#include "dev-replace.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010029#include "check-integrity.h"
Josef Bacik606686e2012-06-04 14:03:51 -040030#include "rcu-string.h"
David Woodhouse53b381b2013-01-29 18:40:14 -050031#include "raid56.h"
Arne Jansena2de7332011-03-08 14:14:00 +010032
33/*
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
37 * any can be found.
38 *
39 * Future enhancements:
Arne Jansena2de7332011-03-08 14:14:00 +010040 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
Arne Jansena2de7332011-03-08 14:14:00 +010042 * - track and record media errors, throw out bad devices
Arne Jansena2de7332011-03-08 14:14:00 +010043 * - add a mode to also read unallocated space
Arne Jansena2de7332011-03-08 14:14:00 +010044 */
45
Stefan Behrensb5d67f62012-03-27 14:21:27 -040046struct scrub_block;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010047struct scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +010048
Stefan Behrensff023aa2012-11-06 11:43:11 +010049/*
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
54 */
55#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
Stefan Behrens7a9e9982012-11-02 14:58:04 +010058
59/*
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
63 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -040064#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
Arne Jansena2de7332011-03-08 14:14:00 +010065
Miao Xieaf8e2d12014-10-23 14:42:50 +080066struct scrub_recover {
Elena Reshetova6f615012017-03-03 10:55:21 +020067 refcount_t refs;
Miao Xieaf8e2d12014-10-23 14:42:50 +080068 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +080069 u64 map_length;
70};
71
Arne Jansena2de7332011-03-08 14:14:00 +010072struct scrub_page {
Stefan Behrensb5d67f62012-03-27 14:21:27 -040073 struct scrub_block *sblock;
74 struct page *page;
Stefan Behrens442a4f62012-05-25 16:06:08 +020075 struct btrfs_device *dev;
Miao Xie5a6ac9e2014-11-06 17:20:58 +080076 struct list_head list;
Arne Jansena2de7332011-03-08 14:14:00 +010077 u64 flags; /* extent flags */
78 u64 generation;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040079 u64 logical;
80 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010081 u64 physical_for_dev_replace;
Zhao Lei57019342015-01-20 15:11:45 +080082 atomic_t refs;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040083 struct {
84 unsigned int mirror_num:8;
85 unsigned int have_csum:1;
86 unsigned int io_error:1;
87 };
Arne Jansena2de7332011-03-08 14:14:00 +010088 u8 csum[BTRFS_CSUM_SIZE];
Miao Xieaf8e2d12014-10-23 14:42:50 +080089
90 struct scrub_recover *recover;
Arne Jansena2de7332011-03-08 14:14:00 +010091};
92
93struct scrub_bio {
94 int index;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010095 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +010096 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010097 struct bio *bio;
98 int err;
99 u64 logical;
100 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100101#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
103#else
104 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
105#endif
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400106 int page_count;
Arne Jansena2de7332011-03-08 14:14:00 +0100107 int next_free;
108 struct btrfs_work work;
109};
110
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400111struct scrub_block {
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100112 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400113 int page_count;
114 atomic_t outstanding_pages;
Elena Reshetova186debd2017-03-03 10:55:23 +0200115 refcount_t refs; /* free mem on transition to zero */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100116 struct scrub_ctx *sctx;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800117 struct scrub_parity *sparity;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400118 struct {
119 unsigned int header_error:1;
120 unsigned int checksum_error:1;
121 unsigned int no_io_error_seen:1;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200122 unsigned int generation_error:1; /* also sets header_error */
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800123
124 /* The following is for the data used to check parity */
125 /* It is for the data with checksum */
126 unsigned int data_corrected:1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400127 };
Omar Sandoval73ff61d2015-06-19 11:52:51 -0700128 struct btrfs_work work;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400129};
130
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800131/* Used for the chunks with parity stripe such RAID5/6 */
132struct scrub_parity {
133 struct scrub_ctx *sctx;
134
135 struct btrfs_device *scrub_dev;
136
137 u64 logic_start;
138
139 u64 logic_end;
140
141 int nsectors;
142
Liu Bo972d7212017-04-03 13:45:33 -0700143 u64 stripe_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800144
Elena Reshetova78a76452017-03-03 10:55:24 +0200145 refcount_t refs;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800146
147 struct list_head spages;
148
149 /* Work of parity check and repair */
150 struct btrfs_work work;
151
152 /* Mark the parity blocks which have data */
153 unsigned long *dbitmap;
154
155 /*
156 * Mark the parity blocks which have data, but errors happen when
157 * read data or check data
158 */
159 unsigned long *ebitmap;
160
161 unsigned long bitmap[0];
162};
163
Stefan Behrensff023aa2012-11-06 11:43:11 +0100164struct scrub_wr_ctx {
165 struct scrub_bio *wr_curr_bio;
166 struct btrfs_device *tgtdev;
167 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168 atomic_t flush_all_writes;
169 struct mutex wr_lock;
170};
171
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100172struct scrub_ctx {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100173 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400174 struct btrfs_fs_info *fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100175 int first_free;
176 int curr;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100177 atomic_t bios_in_flight;
178 atomic_t workers_pending;
Arne Jansena2de7332011-03-08 14:14:00 +0100179 spinlock_t list_lock;
180 wait_queue_head_t list_wait;
181 u16 csum_size;
182 struct list_head csum_list;
183 atomic_t cancel_req;
Arne Jansen86287642011-03-23 16:34:19 +0100184 int readonly;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100185 int pages_per_rd_bio;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400186 u32 sectorsize;
187 u32 nodesize;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100188
189 int is_dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100190 struct scrub_wr_ctx wr_ctx;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100191
Arne Jansena2de7332011-03-08 14:14:00 +0100192 /*
193 * statistics
194 */
195 struct btrfs_scrub_progress stat;
196 spinlock_t stat_lock;
Filipe Mananaf55985f2015-02-09 21:14:24 +0000197
198 /*
199 * Use a ref counter to avoid use-after-free issues. Scrub workers
200 * decrement bios_in_flight and workers_pending and then do a wakeup
201 * on the list_wait wait queue. We must ensure the main scrub task
202 * doesn't free the scrub context before or while the workers are
203 * doing the wakeup() call.
204 */
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200205 refcount_t refs;
Arne Jansena2de7332011-03-08 14:14:00 +0100206};
207
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200208struct scrub_fixup_nodatasum {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100209 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100210 struct btrfs_device *dev;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200211 u64 logical;
212 struct btrfs_root *root;
213 struct btrfs_work work;
214 int mirror_num;
215};
216
Josef Bacik652f25a2013-09-12 16:58:28 -0400217struct scrub_nocow_inode {
218 u64 inum;
219 u64 offset;
220 u64 root;
221 struct list_head list;
222};
223
Stefan Behrensff023aa2012-11-06 11:43:11 +0100224struct scrub_copy_nocow_ctx {
225 struct scrub_ctx *sctx;
226 u64 logical;
227 u64 len;
228 int mirror_num;
229 u64 physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -0400230 struct list_head inodes;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100231 struct btrfs_work work;
232};
233
Jan Schmidt558540c2011-06-13 19:59:12 +0200234struct scrub_warning {
235 struct btrfs_path *path;
236 u64 extent_item_size;
Jan Schmidt558540c2011-06-13 19:59:12 +0200237 const char *errstr;
238 sector_t sector;
239 u64 logical;
240 struct btrfs_device *dev;
Jan Schmidt558540c2011-06-13 19:59:12 +0200241};
242
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800243struct full_stripe_lock {
244 struct rb_node node;
245 u64 logical;
246 u64 refs;
247 struct mutex mutex;
248};
249
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100250static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
251static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
252static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
253static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400254static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
Zhao Leibe50a8d2015-01-20 15:11:42 +0800255static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100256 struct scrub_block *sblocks_for_recheck);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100257static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
Zhao Leiaffe4a52015-08-24 21:32:06 +0800258 struct scrub_block *sblock,
259 int retry_failed_mirror);
Zhao Leiba7cf982015-08-24 21:18:02 +0800260static void scrub_recheck_block_checksum(struct scrub_block *sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400261static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +0800262 struct scrub_block *sblock_good);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400263static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
264 struct scrub_block *sblock_good,
265 int page_num, int force_write);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100266static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
267static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
268 int page_num);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400269static int scrub_checksum_data(struct scrub_block *sblock);
270static int scrub_checksum_tree_block(struct scrub_block *sblock);
271static int scrub_checksum_super(struct scrub_block *sblock);
272static void scrub_block_get(struct scrub_block *sblock);
273static void scrub_block_put(struct scrub_block *sblock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100274static void scrub_page_get(struct scrub_page *spage);
275static void scrub_page_put(struct scrub_page *spage);
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800276static void scrub_parity_get(struct scrub_parity *sparity);
277static void scrub_parity_put(struct scrub_parity *sparity);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100278static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
279 struct scrub_page *spage);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100280static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100281 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100282 u64 gen, int mirror_num, u8 *csum, int force,
283 u64 physical_for_dev_replace);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200284static void scrub_bio_end_io(struct bio *bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400285static void scrub_bio_end_io_worker(struct btrfs_work *work);
286static void scrub_block_complete(struct scrub_block *sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100287static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
288 u64 extent_logical, u64 extent_len,
289 u64 *extent_physical,
290 struct btrfs_device **extent_dev,
291 int *extent_mirror_num);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100292static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
293 struct scrub_page *spage);
294static void scrub_wr_submit(struct scrub_ctx *sctx);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200295static void scrub_wr_bio_end_io(struct bio *bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100296static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
297static int write_page_nocow(struct scrub_ctx *sctx,
298 u64 physical_for_dev_replace, struct page *page);
299static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
Josef Bacik652f25a2013-09-12 16:58:28 -0400300 struct scrub_copy_nocow_ctx *ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100301static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
302 int mirror_num, u64 physical_for_dev_replace);
303static void copy_nocow_pages_worker(struct btrfs_work *work);
Wang Shilongcb7ab022013-12-04 21:16:53 +0800304static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Wang Shilong3cb09292013-12-04 21:15:19 +0800305static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000306static void scrub_put_ctx(struct scrub_ctx *sctx);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400307
308
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100309static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
310{
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200311 refcount_inc(&sctx->refs);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100312 atomic_inc(&sctx->bios_in_flight);
313}
314
315static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
316{
317 atomic_dec(&sctx->bios_in_flight);
318 wake_up(&sctx->list_wait);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000319 scrub_put_ctx(sctx);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100320}
321
Wang Shilongcb7ab022013-12-04 21:16:53 +0800322static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
Wang Shilong3cb09292013-12-04 21:15:19 +0800323{
324 while (atomic_read(&fs_info->scrub_pause_req)) {
325 mutex_unlock(&fs_info->scrub_lock);
326 wait_event(fs_info->scrub_pause_wait,
327 atomic_read(&fs_info->scrub_pause_req) == 0);
328 mutex_lock(&fs_info->scrub_lock);
329 }
330}
331
Zhaolei0e22be82015-08-05 16:43:28 +0800332static void scrub_pause_on(struct btrfs_fs_info *fs_info)
Wang Shilongcb7ab022013-12-04 21:16:53 +0800333{
334 atomic_inc(&fs_info->scrubs_paused);
335 wake_up(&fs_info->scrub_pause_wait);
Zhaolei0e22be82015-08-05 16:43:28 +0800336}
Wang Shilongcb7ab022013-12-04 21:16:53 +0800337
Zhaolei0e22be82015-08-05 16:43:28 +0800338static void scrub_pause_off(struct btrfs_fs_info *fs_info)
339{
Wang Shilongcb7ab022013-12-04 21:16:53 +0800340 mutex_lock(&fs_info->scrub_lock);
341 __scrub_blocked_if_needed(fs_info);
342 atomic_dec(&fs_info->scrubs_paused);
343 mutex_unlock(&fs_info->scrub_lock);
344
345 wake_up(&fs_info->scrub_pause_wait);
346}
347
Zhaolei0e22be82015-08-05 16:43:28 +0800348static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
349{
350 scrub_pause_on(fs_info);
351 scrub_pause_off(fs_info);
352}
353
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100354/*
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800355 * Insert new full stripe lock into full stripe locks tree
356 *
357 * Return pointer to existing or newly inserted full_stripe_lock structure if
358 * everything works well.
359 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
360 *
361 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
362 * function
363 */
364static struct full_stripe_lock *insert_full_stripe_lock(
365 struct btrfs_full_stripe_locks_tree *locks_root,
366 u64 fstripe_logical)
367{
368 struct rb_node **p;
369 struct rb_node *parent = NULL;
370 struct full_stripe_lock *entry;
371 struct full_stripe_lock *ret;
372
373 WARN_ON(!mutex_is_locked(&locks_root->lock));
374
375 p = &locks_root->root.rb_node;
376 while (*p) {
377 parent = *p;
378 entry = rb_entry(parent, struct full_stripe_lock, node);
379 if (fstripe_logical < entry->logical) {
380 p = &(*p)->rb_left;
381 } else if (fstripe_logical > entry->logical) {
382 p = &(*p)->rb_right;
383 } else {
384 entry->refs++;
385 return entry;
386 }
387 }
388
389 /* Insert new lock */
390 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
391 if (!ret)
392 return ERR_PTR(-ENOMEM);
393 ret->logical = fstripe_logical;
394 ret->refs = 1;
395 mutex_init(&ret->mutex);
396
397 rb_link_node(&ret->node, parent, p);
398 rb_insert_color(&ret->node, &locks_root->root);
399 return ret;
400}
401
402/*
403 * Search for a full stripe lock of a block group
404 *
405 * Return pointer to existing full stripe lock if found
406 * Return NULL if not found
407 */
408static struct full_stripe_lock *search_full_stripe_lock(
409 struct btrfs_full_stripe_locks_tree *locks_root,
410 u64 fstripe_logical)
411{
412 struct rb_node *node;
413 struct full_stripe_lock *entry;
414
415 WARN_ON(!mutex_is_locked(&locks_root->lock));
416
417 node = locks_root->root.rb_node;
418 while (node) {
419 entry = rb_entry(node, struct full_stripe_lock, node);
420 if (fstripe_logical < entry->logical)
421 node = node->rb_left;
422 else if (fstripe_logical > entry->logical)
423 node = node->rb_right;
424 else
425 return entry;
426 }
427 return NULL;
428}
429
430/*
431 * Helper to get full stripe logical from a normal bytenr.
432 *
433 * Caller must ensure @cache is a RAID56 block group.
434 */
435static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
436 u64 bytenr)
437{
438 u64 ret;
439
440 /*
441 * Due to chunk item size limit, full stripe length should not be
442 * larger than U32_MAX. Just a sanity check here.
443 */
444 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
445
446 /*
447 * round_down() can only handle power of 2, while RAID56 full
448 * stripe length can be 64KiB * n, so we need to manually round down.
449 */
450 ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
451 cache->full_stripe_len + cache->key.objectid;
452 return ret;
453}
454
455/*
456 * Lock a full stripe to avoid concurrency of recovery and read
457 *
458 * It's only used for profiles with parities (RAID5/6), for other profiles it
459 * does nothing.
460 *
461 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
462 * So caller must call unlock_full_stripe() at the same context.
463 *
464 * Return <0 if encounters error.
465 */
466static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
467 bool *locked_ret)
468{
469 struct btrfs_block_group_cache *bg_cache;
470 struct btrfs_full_stripe_locks_tree *locks_root;
471 struct full_stripe_lock *existing;
472 u64 fstripe_start;
473 int ret = 0;
474
475 *locked_ret = false;
476 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
477 if (!bg_cache) {
478 ASSERT(0);
479 return -ENOENT;
480 }
481
482 /* Profiles not based on parity don't need full stripe lock */
483 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
484 goto out;
485 locks_root = &bg_cache->full_stripe_locks_root;
486
487 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
488
489 /* Now insert the full stripe lock */
490 mutex_lock(&locks_root->lock);
491 existing = insert_full_stripe_lock(locks_root, fstripe_start);
492 mutex_unlock(&locks_root->lock);
493 if (IS_ERR(existing)) {
494 ret = PTR_ERR(existing);
495 goto out;
496 }
497 mutex_lock(&existing->mutex);
498 *locked_ret = true;
499out:
500 btrfs_put_block_group(bg_cache);
501 return ret;
502}
503
504/*
505 * Unlock a full stripe.
506 *
507 * NOTE: Caller must ensure it's the same context calling corresponding
508 * lock_full_stripe().
509 *
510 * Return 0 if we unlock full stripe without problem.
511 * Return <0 for error
512 */
513static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
514 bool locked)
515{
516 struct btrfs_block_group_cache *bg_cache;
517 struct btrfs_full_stripe_locks_tree *locks_root;
518 struct full_stripe_lock *fstripe_lock;
519 u64 fstripe_start;
520 bool freeit = false;
521 int ret = 0;
522
523 /* If we didn't acquire full stripe lock, no need to continue */
524 if (!locked)
525 return 0;
526
527 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
528 if (!bg_cache) {
529 ASSERT(0);
530 return -ENOENT;
531 }
532 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
533 goto out;
534
535 locks_root = &bg_cache->full_stripe_locks_root;
536 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
537
538 mutex_lock(&locks_root->lock);
539 fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
540 /* Unpaired unlock_full_stripe() detected */
541 if (!fstripe_lock) {
542 WARN_ON(1);
543 ret = -ENOENT;
544 mutex_unlock(&locks_root->lock);
545 goto out;
546 }
547
548 if (fstripe_lock->refs == 0) {
549 WARN_ON(1);
550 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
551 fstripe_lock->logical);
552 } else {
553 fstripe_lock->refs--;
554 }
555
556 if (fstripe_lock->refs == 0) {
557 rb_erase(&fstripe_lock->node, &locks_root->root);
558 freeit = true;
559 }
560 mutex_unlock(&locks_root->lock);
561
562 mutex_unlock(&fstripe_lock->mutex);
563 if (freeit)
564 kfree(fstripe_lock);
565out:
566 btrfs_put_block_group(bg_cache);
567 return ret;
568}
569
570/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100571 * used for workers that require transaction commits (i.e., for the
572 * NOCOW case)
573 */
574static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
575{
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400576 struct btrfs_fs_info *fs_info = sctx->fs_info;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100577
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200578 refcount_inc(&sctx->refs);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100579 /*
580 * increment scrubs_running to prevent cancel requests from
581 * completing as long as a worker is running. we must also
582 * increment scrubs_paused to prevent deadlocking on pause
583 * requests used for transactions commits (as the worker uses a
584 * transaction context). it is safe to regard the worker
585 * as paused for all matters practical. effectively, we only
586 * avoid cancellation requests from completing.
587 */
588 mutex_lock(&fs_info->scrub_lock);
589 atomic_inc(&fs_info->scrubs_running);
590 atomic_inc(&fs_info->scrubs_paused);
591 mutex_unlock(&fs_info->scrub_lock);
Wang Shilong32a44782014-02-19 19:24:19 +0800592
593 /*
594 * check if @scrubs_running=@scrubs_paused condition
595 * inside wait_event() is not an atomic operation.
596 * which means we may inc/dec @scrub_running/paused
597 * at any time. Let's wake up @scrub_pause_wait as
598 * much as we can to let commit transaction blocked less.
599 */
600 wake_up(&fs_info->scrub_pause_wait);
601
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100602 atomic_inc(&sctx->workers_pending);
603}
604
605/* used for workers that require transaction commits */
606static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
607{
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400608 struct btrfs_fs_info *fs_info = sctx->fs_info;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100609
610 /*
611 * see scrub_pending_trans_workers_inc() why we're pretending
612 * to be paused in the scrub counters
613 */
614 mutex_lock(&fs_info->scrub_lock);
615 atomic_dec(&fs_info->scrubs_running);
616 atomic_dec(&fs_info->scrubs_paused);
617 mutex_unlock(&fs_info->scrub_lock);
618 atomic_dec(&sctx->workers_pending);
619 wake_up(&fs_info->scrub_pause_wait);
620 wake_up(&sctx->list_wait);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000621 scrub_put_ctx(sctx);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100622}
623
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100624static void scrub_free_csums(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100625{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100626 while (!list_empty(&sctx->csum_list)) {
Arne Jansena2de7332011-03-08 14:14:00 +0100627 struct btrfs_ordered_sum *sum;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100628 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +0100629 struct btrfs_ordered_sum, list);
630 list_del(&sum->list);
631 kfree(sum);
632 }
633}
634
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100635static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100636{
637 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100638
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100639 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100640 return;
641
David Sterbae241dde2017-05-16 19:10:26 +0200642 mutex_lock(&sctx->wr_ctx.wr_lock);
643 kfree(sctx->wr_ctx.wr_curr_bio);
644 sctx->wr_ctx.wr_curr_bio = NULL;
645 mutex_unlock(&sctx->wr_ctx.wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100646
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400647 /* this can happen when scrub is cancelled */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100648 if (sctx->curr != -1) {
649 struct scrub_bio *sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400650
651 for (i = 0; i < sbio->page_count; i++) {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100652 WARN_ON(!sbio->pagev[i]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400653 scrub_block_put(sbio->pagev[i]->sblock);
654 }
655 bio_put(sbio->bio);
656 }
657
Stefan Behrensff023aa2012-11-06 11:43:11 +0100658 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100659 struct scrub_bio *sbio = sctx->bios[i];
Arne Jansena2de7332011-03-08 14:14:00 +0100660
661 if (!sbio)
662 break;
Arne Jansena2de7332011-03-08 14:14:00 +0100663 kfree(sbio);
664 }
665
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100666 scrub_free_csums(sctx);
667 kfree(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100668}
669
Filipe Mananaf55985f2015-02-09 21:14:24 +0000670static void scrub_put_ctx(struct scrub_ctx *sctx)
671{
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200672 if (refcount_dec_and_test(&sctx->refs))
Filipe Mananaf55985f2015-02-09 21:14:24 +0000673 scrub_free_ctx(sctx);
674}
675
Arne Jansena2de7332011-03-08 14:14:00 +0100676static noinline_for_stack
Stefan Behrens63a212a2012-11-05 18:29:28 +0100677struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +0100678{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100679 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100680 int i;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400681 struct btrfs_fs_info *fs_info = dev->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100682
David Sterba58c4e172016-02-11 10:49:42 +0100683 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100684 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100685 goto nomem;
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200686 refcount_set(&sctx->refs, 1);
Stefan Behrens63a212a2012-11-05 18:29:28 +0100687 sctx->is_dev_replace = is_dev_replace;
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200688 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100689 sctx->curr = -1;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400690 sctx->fs_info = dev->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100691 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Arne Jansena2de7332011-03-08 14:14:00 +0100692 struct scrub_bio *sbio;
693
David Sterba58c4e172016-02-11 10:49:42 +0100694 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
Arne Jansena2de7332011-03-08 14:14:00 +0100695 if (!sbio)
696 goto nomem;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100697 sctx->bios[i] = sbio;
Arne Jansena2de7332011-03-08 14:14:00 +0100698
Arne Jansena2de7332011-03-08 14:14:00 +0100699 sbio->index = i;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100700 sbio->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400701 sbio->page_count = 0;
Liu Bo9e0af232014-08-15 23:36:53 +0800702 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
703 scrub_bio_end_io_worker, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +0100704
Stefan Behrensff023aa2012-11-06 11:43:11 +0100705 if (i != SCRUB_BIOS_PER_SCTX - 1)
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100706 sctx->bios[i]->next_free = i + 1;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200707 else
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100708 sctx->bios[i]->next_free = -1;
Arne Jansena2de7332011-03-08 14:14:00 +0100709 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100710 sctx->first_free = 0;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400711 sctx->nodesize = fs_info->nodesize;
712 sctx->sectorsize = fs_info->sectorsize;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100713 atomic_set(&sctx->bios_in_flight, 0);
714 atomic_set(&sctx->workers_pending, 0);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100715 atomic_set(&sctx->cancel_req, 0);
716 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
717 INIT_LIST_HEAD(&sctx->csum_list);
Arne Jansena2de7332011-03-08 14:14:00 +0100718
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100719 spin_lock_init(&sctx->list_lock);
720 spin_lock_init(&sctx->stat_lock);
721 init_waitqueue_head(&sctx->list_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100722
David Sterba8fcdac32017-05-16 19:10:23 +0200723 WARN_ON(sctx->wr_ctx.wr_curr_bio != NULL);
724 mutex_init(&sctx->wr_ctx.wr_lock);
725 sctx->wr_ctx.wr_curr_bio = NULL;
726 if (is_dev_replace) {
727 WARN_ON(!dev->bdev);
728 sctx->wr_ctx.pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
729 sctx->wr_ctx.tgtdev = dev;
730 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100731 }
David Sterba8fcdac32017-05-16 19:10:23 +0200732
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100733 return sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100734
735nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100736 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100737 return ERR_PTR(-ENOMEM);
738}
739
Stefan Behrensff023aa2012-11-06 11:43:11 +0100740static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
741 void *warn_ctx)
Jan Schmidt558540c2011-06-13 19:59:12 +0200742{
743 u64 isize;
744 u32 nlink;
745 int ret;
746 int i;
747 struct extent_buffer *eb;
748 struct btrfs_inode_item *inode_item;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100749 struct scrub_warning *swarn = warn_ctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400750 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200751 struct inode_fs_paths *ipath = NULL;
752 struct btrfs_root *local_root;
753 struct btrfs_key root_key;
David Sterba1d4c08e2015-01-02 19:36:14 +0100754 struct btrfs_key key;
Jan Schmidt558540c2011-06-13 19:59:12 +0200755
756 root_key.objectid = root;
757 root_key.type = BTRFS_ROOT_ITEM_KEY;
758 root_key.offset = (u64)-1;
759 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
760 if (IS_ERR(local_root)) {
761 ret = PTR_ERR(local_root);
762 goto err;
763 }
764
David Sterba14692cc2015-01-02 18:55:46 +0100765 /*
766 * this makes the path point to (inum INODE_ITEM ioff)
767 */
David Sterba1d4c08e2015-01-02 19:36:14 +0100768 key.objectid = inum;
769 key.type = BTRFS_INODE_ITEM_KEY;
770 key.offset = 0;
771
772 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
Jan Schmidt558540c2011-06-13 19:59:12 +0200773 if (ret) {
774 btrfs_release_path(swarn->path);
775 goto err;
776 }
777
778 eb = swarn->path->nodes[0];
779 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
780 struct btrfs_inode_item);
781 isize = btrfs_inode_size(eb, inode_item);
782 nlink = btrfs_inode_nlink(eb, inode_item);
783 btrfs_release_path(swarn->path);
784
785 ipath = init_ipath(4096, local_root, swarn->path);
Dan Carpenter26bdef52011-11-16 11:28:01 +0300786 if (IS_ERR(ipath)) {
787 ret = PTR_ERR(ipath);
788 ipath = NULL;
789 goto err;
790 }
Jan Schmidt558540c2011-06-13 19:59:12 +0200791 ret = paths_from_inode(inum, ipath);
792
793 if (ret < 0)
794 goto err;
795
796 /*
797 * we deliberately ignore the bit ipath might have been too small to
798 * hold all of the paths here
799 */
800 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400801 btrfs_warn_in_rcu(fs_info,
802 "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
803 swarn->errstr, swarn->logical,
804 rcu_str_deref(swarn->dev->name),
805 (unsigned long long)swarn->sector,
806 root, inum, offset,
807 min(isize - offset, (u64)PAGE_SIZE), nlink,
808 (char *)(unsigned long)ipath->fspath->val[i]);
Jan Schmidt558540c2011-06-13 19:59:12 +0200809
810 free_ipath(ipath);
811 return 0;
812
813err:
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400814 btrfs_warn_in_rcu(fs_info,
815 "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
816 swarn->errstr, swarn->logical,
817 rcu_str_deref(swarn->dev->name),
818 (unsigned long long)swarn->sector,
819 root, inum, offset, ret);
Jan Schmidt558540c2011-06-13 19:59:12 +0200820
821 free_ipath(ipath);
822 return 0;
823}
824
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400825static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
Jan Schmidt558540c2011-06-13 19:59:12 +0200826{
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100827 struct btrfs_device *dev;
828 struct btrfs_fs_info *fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200829 struct btrfs_path *path;
830 struct btrfs_key found_key;
831 struct extent_buffer *eb;
832 struct btrfs_extent_item *ei;
833 struct scrub_warning swarn;
Jan Schmidt558540c2011-06-13 19:59:12 +0200834 unsigned long ptr = 0;
Jan Schmidt4692cf52011-12-02 14:56:41 +0100835 u64 extent_item_pos;
Liu Bo69917e42012-09-07 20:01:28 -0600836 u64 flags = 0;
837 u64 ref_root;
838 u32 item_size;
Dan Carpenter07c9a8e2016-03-11 11:08:56 +0300839 u8 ref_level = 0;
Liu Bo69917e42012-09-07 20:01:28 -0600840 int ret;
Jan Schmidt558540c2011-06-13 19:59:12 +0200841
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100842 WARN_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100843 dev = sblock->pagev[0]->dev;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400844 fs_info = sblock->sctx->fs_info;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100845
Jan Schmidt558540c2011-06-13 19:59:12 +0200846 path = btrfs_alloc_path();
David Sterba8b9456d2014-07-30 01:25:30 +0200847 if (!path)
848 return;
Jan Schmidt558540c2011-06-13 19:59:12 +0200849
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100850 swarn.sector = (sblock->pagev[0]->physical) >> 9;
851 swarn.logical = sblock->pagev[0]->logical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200852 swarn.errstr = errstr;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100853 swarn.dev = NULL;
Jan Schmidt558540c2011-06-13 19:59:12 +0200854
Liu Bo69917e42012-09-07 20:01:28 -0600855 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
856 &flags);
Jan Schmidt558540c2011-06-13 19:59:12 +0200857 if (ret < 0)
858 goto out;
859
Jan Schmidt4692cf52011-12-02 14:56:41 +0100860 extent_item_pos = swarn.logical - found_key.objectid;
Jan Schmidt558540c2011-06-13 19:59:12 +0200861 swarn.extent_item_size = found_key.offset;
862
863 eb = path->nodes[0];
864 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
865 item_size = btrfs_item_size_nr(eb, path->slots[0]);
866
Liu Bo69917e42012-09-07 20:01:28 -0600867 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Jan Schmidt558540c2011-06-13 19:59:12 +0200868 do {
Liu Bo6eda71d2014-06-09 10:54:07 +0800869 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
870 item_size, &ref_root,
871 &ref_level);
David Sterbaecaeb142015-10-08 09:01:03 +0200872 btrfs_warn_in_rcu(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400873 "%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
874 errstr, swarn.logical,
Josef Bacik606686e2012-06-04 14:03:51 -0400875 rcu_str_deref(dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200876 (unsigned long long)swarn.sector,
877 ref_level ? "node" : "leaf",
878 ret < 0 ? -1 : ref_level,
879 ret < 0 ? -1 : ref_root);
880 } while (ret != 1);
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600881 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200882 } else {
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600883 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200884 swarn.path = path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100885 swarn.dev = dev;
Jan Schmidt7a3ae2f2012-03-23 17:32:28 +0100886 iterate_extent_inodes(fs_info, found_key.objectid,
887 extent_item_pos, 1,
Jan Schmidt558540c2011-06-13 19:59:12 +0200888 scrub_print_warning_inode, &swarn);
889 }
890
891out:
892 btrfs_free_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200893}
894
Stefan Behrensff023aa2012-11-06 11:43:11 +0100895static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200896{
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200897 struct page *page = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200898 unsigned long index;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100899 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200900 int ret;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200901 int corrected = 0;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200902 struct btrfs_key key;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200903 struct inode *inode = NULL;
Liu Bo6f1c3602013-01-29 03:22:10 +0000904 struct btrfs_fs_info *fs_info;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200905 u64 end = offset + PAGE_SIZE - 1;
906 struct btrfs_root *local_root;
Liu Bo6f1c3602013-01-29 03:22:10 +0000907 int srcu_index;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200908
909 key.objectid = root;
910 key.type = BTRFS_ROOT_ITEM_KEY;
911 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +0000912
913 fs_info = fixup->root->fs_info;
914 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
915
916 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
917 if (IS_ERR(local_root)) {
918 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200919 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +0000920 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200921
922 key.type = BTRFS_INODE_ITEM_KEY;
923 key.objectid = inum;
924 key.offset = 0;
Liu Bo6f1c3602013-01-29 03:22:10 +0000925 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
926 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200927 if (IS_ERR(inode))
928 return PTR_ERR(inode);
929
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300930 index = offset >> PAGE_SHIFT;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200931
932 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200933 if (!page) {
934 ret = -ENOMEM;
935 goto out;
936 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200937
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200938 if (PageUptodate(page)) {
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200939 if (PageDirty(page)) {
940 /*
941 * we need to write the data to the defect sector. the
942 * data that was in that sector is not in memory,
943 * because the page was modified. we must not write the
944 * modified page to that sector.
945 *
946 * TODO: what could be done here: wait for the delalloc
947 * runner to write out that page (might involve
948 * COW) and see whether the sector is still
949 * referenced afterwards.
950 *
951 * For the meantime, we'll treat this error
952 * incorrectable, although there is a chance that a
953 * later scrub will find the bad sector again and that
954 * there's no dirty page in memory, then.
955 */
956 ret = -EIO;
957 goto out;
958 }
Josef Bacik6ec656b2017-05-05 11:57:14 -0400959 ret = repair_io_failure(fs_info, inum, offset, PAGE_SIZE,
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200960 fixup->logical, page,
Miao Xieffdd2012014-09-12 18:44:00 +0800961 offset - page_offset(page),
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200962 fixup->mirror_num);
963 unlock_page(page);
964 corrected = !ret;
965 } else {
966 /*
967 * we need to get good data first. the general readpage path
968 * will call repair_io_failure for us, we just have to make
969 * sure we read the bad mirror.
970 */
971 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
David Sterbaceeb0ae2016-04-26 23:54:39 +0200972 EXTENT_DAMAGED);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200973 if (ret) {
974 /* set_extent_bits should give proper error */
975 WARN_ON(ret > 0);
976 if (ret > 0)
977 ret = -EFAULT;
978 goto out;
979 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200980
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200981 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
982 btrfs_get_extent,
983 fixup->mirror_num);
984 wait_on_page_locked(page);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200985
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200986 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
987 end, EXTENT_DAMAGED, 0, NULL);
988 if (!corrected)
989 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
David Sterba91166212016-04-26 23:54:39 +0200990 EXTENT_DAMAGED);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200991 }
992
993out:
994 if (page)
995 put_page(page);
Tobias Klauser7fb18a02014-04-25 14:58:05 +0200996
997 iput(inode);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200998
999 if (ret < 0)
1000 return ret;
1001
1002 if (ret == 0 && corrected) {
1003 /*
1004 * we only need to call readpage for one of the inodes belonging
1005 * to this extent. so make iterate_extent_inodes stop
1006 */
1007 return 1;
1008 }
1009
1010 return -EIO;
1011}
1012
1013static void scrub_fixup_nodatasum(struct btrfs_work *work)
1014{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001015 struct btrfs_fs_info *fs_info;
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001016 int ret;
1017 struct scrub_fixup_nodatasum *fixup;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001018 struct scrub_ctx *sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001019 struct btrfs_trans_handle *trans = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001020 struct btrfs_path *path;
1021 int uncorrectable = 0;
1022
1023 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001024 sctx = fixup->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001025 fs_info = fixup->root->fs_info;
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001026
1027 path = btrfs_alloc_path();
1028 if (!path) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001029 spin_lock(&sctx->stat_lock);
1030 ++sctx->stat.malloc_errors;
1031 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001032 uncorrectable = 1;
1033 goto out;
1034 }
1035
1036 trans = btrfs_join_transaction(fixup->root);
1037 if (IS_ERR(trans)) {
1038 uncorrectable = 1;
1039 goto out;
1040 }
1041
1042 /*
1043 * the idea is to trigger a regular read through the standard path. we
1044 * read a page from the (failed) logical address by specifying the
1045 * corresponding copynum of the failed sector. thus, that readpage is
1046 * expected to fail.
1047 * that is the point where on-the-fly error correction will kick in
1048 * (once it's finished) and rewrite the failed sector if a good copy
1049 * can be found.
1050 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001051 ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
1052 scrub_fixup_readpage, fixup);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001053 if (ret < 0) {
1054 uncorrectable = 1;
1055 goto out;
1056 }
1057 WARN_ON(ret != 1);
1058
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001059 spin_lock(&sctx->stat_lock);
1060 ++sctx->stat.corrected_errors;
1061 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001062
1063out:
1064 if (trans && !IS_ERR(trans))
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04001065 btrfs_end_transaction(trans);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001066 if (uncorrectable) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001067 spin_lock(&sctx->stat_lock);
1068 ++sctx->stat.uncorrectable_errors;
1069 spin_unlock(&sctx->stat_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001070 btrfs_dev_replace_stats_inc(
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001071 &fs_info->dev_replace.num_uncorrectable_read_errors);
1072 btrfs_err_rl_in_rcu(fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02001073 "unable to fixup (nodatasum) error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001074 fixup->logical, rcu_str_deref(fixup->dev->name));
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001075 }
1076
1077 btrfs_free_path(path);
1078 kfree(fixup);
1079
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001080 scrub_pending_trans_workers_dec(sctx);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001081}
1082
Miao Xieaf8e2d12014-10-23 14:42:50 +08001083static inline void scrub_get_recover(struct scrub_recover *recover)
1084{
Elena Reshetova6f615012017-03-03 10:55:21 +02001085 refcount_inc(&recover->refs);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001086}
1087
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001088static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
1089 struct scrub_recover *recover)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001090{
Elena Reshetova6f615012017-03-03 10:55:21 +02001091 if (refcount_dec_and_test(&recover->refs)) {
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001092 btrfs_bio_counter_dec(fs_info);
Zhao Lei6e9606d2015-01-20 15:11:34 +08001093 btrfs_put_bbio(recover->bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001094 kfree(recover);
1095 }
1096}
1097
Arne Jansena2de7332011-03-08 14:14:00 +01001098/*
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001099 * scrub_handle_errored_block gets called when either verification of the
1100 * pages failed or the bio failed to read, e.g. with EIO. In the latter
1101 * case, this function handles all pages in the bio, even though only one
1102 * may be bad.
1103 * The goal of this function is to repair the errored block by using the
1104 * contents of one of the mirrors.
Arne Jansena2de7332011-03-08 14:14:00 +01001105 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001106static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
Arne Jansena2de7332011-03-08 14:14:00 +01001107{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001108 struct scrub_ctx *sctx = sblock_to_check->sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001109 struct btrfs_device *dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001110 struct btrfs_fs_info *fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01001111 u64 length;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001112 u64 logical;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001113 unsigned int failed_mirror_index;
1114 unsigned int is_metadata;
1115 unsigned int have_csum;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001116 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
1117 struct scrub_block *sblock_bad;
Arne Jansena2de7332011-03-08 14:14:00 +01001118 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001119 int mirror_index;
1120 int page_num;
1121 int success;
Qu Wenruo28d70e22017-04-14 08:35:55 +08001122 bool full_stripe_locked;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001123 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1124 DEFAULT_RATELIMIT_BURST);
Arne Jansena2de7332011-03-08 14:14:00 +01001125
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001126 BUG_ON(sblock_to_check->page_count < 1);
Jeff Mahoneyfb456252016-06-22 18:54:56 -04001127 fs_info = sctx->fs_info;
Stefan Behrens4ded4f62012-11-14 18:57:29 +00001128 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
1129 /*
1130 * if we find an error in a super block, we just report it.
1131 * They will get written with the next transaction commit
1132 * anyway
1133 */
1134 spin_lock(&sctx->stat_lock);
1135 ++sctx->stat.super_errors;
1136 spin_unlock(&sctx->stat_lock);
1137 return 0;
1138 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001139 length = sblock_to_check->page_count * PAGE_SIZE;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001140 logical = sblock_to_check->pagev[0]->logical;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001141 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
1142 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
1143 is_metadata = !(sblock_to_check->pagev[0]->flags &
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001144 BTRFS_EXTENT_FLAG_DATA);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001145 have_csum = sblock_to_check->pagev[0]->have_csum;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001146 dev = sblock_to_check->pagev[0]->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001147
Qu Wenruo28d70e22017-04-14 08:35:55 +08001148 /*
1149 * For RAID5/6, race can happen for a different device scrub thread.
1150 * For data corruption, Parity and Data threads will both try
1151 * to recovery the data.
1152 * Race can lead to doubly added csum error, or even unrecoverable
1153 * error.
1154 */
1155 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
1156 if (ret < 0) {
1157 spin_lock(&sctx->stat_lock);
1158 if (ret == -ENOMEM)
1159 sctx->stat.malloc_errors++;
1160 sctx->stat.read_errors++;
1161 sctx->stat.uncorrectable_errors++;
1162 spin_unlock(&sctx->stat_lock);
1163 return ret;
1164 }
1165
Stefan Behrensff023aa2012-11-06 11:43:11 +01001166 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
1167 sblocks_for_recheck = NULL;
1168 goto nodatasum_case;
1169 }
1170
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001171 /*
1172 * read all mirrors one after the other. This includes to
1173 * re-read the extent or metadata block that failed (that was
1174 * the cause that this fixup code is called) another time,
1175 * page by page this time in order to know which pages
1176 * caused I/O errors and which ones are good (for all mirrors).
1177 * It is the goal to handle the situation when more than one
1178 * mirror contains I/O errors, but the errors do not
1179 * overlap, i.e. the data can be repaired by selecting the
1180 * pages from those mirrors without I/O error on the
1181 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
1182 * would be that mirror #1 has an I/O error on the first page,
1183 * the second page is good, and mirror #2 has an I/O error on
1184 * the second page, but the first page is good.
1185 * Then the first page of the first mirror can be repaired by
1186 * taking the first page of the second mirror, and the
1187 * second page of the second mirror can be repaired by
1188 * copying the contents of the 2nd page of the 1st mirror.
1189 * One more note: if the pages of one mirror contain I/O
1190 * errors, the checksum cannot be verified. In order to get
1191 * the best data for repairing, the first attempt is to find
1192 * a mirror without I/O errors and with a validated checksum.
1193 * Only if this is not possible, the pages are picked from
1194 * mirrors with I/O errors without considering the checksum.
1195 * If the latter is the case, at the end, the checksum of the
1196 * repaired area is verified in order to correctly maintain
1197 * the statistics.
1198 */
1199
David Sterba31e818f2015-02-20 18:00:26 +01001200 sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
1201 sizeof(*sblocks_for_recheck), GFP_NOFS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001202 if (!sblocks_for_recheck) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001203 spin_lock(&sctx->stat_lock);
1204 sctx->stat.malloc_errors++;
1205 sctx->stat.read_errors++;
1206 sctx->stat.uncorrectable_errors++;
1207 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001208 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001209 goto out;
1210 }
1211
1212 /* setup the context, map the logical blocks and alloc the pages */
Zhao Leibe50a8d2015-01-20 15:11:42 +08001213 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001214 if (ret) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001215 spin_lock(&sctx->stat_lock);
1216 sctx->stat.read_errors++;
1217 sctx->stat.uncorrectable_errors++;
1218 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001219 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001220 goto out;
1221 }
1222 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
1223 sblock_bad = sblocks_for_recheck + failed_mirror_index;
1224
1225 /* build and submit the bios for the failed mirror, check checksums */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001226 scrub_recheck_block(fs_info, sblock_bad, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001227
1228 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
1229 sblock_bad->no_io_error_seen) {
1230 /*
1231 * the error disappeared after reading page by page, or
1232 * the area was part of a huge bio and other parts of the
1233 * bio caused I/O errors, or the block layer merged several
1234 * read requests into one and the error is caused by a
1235 * different bio (usually one of the two latter cases is
1236 * the cause)
1237 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001238 spin_lock(&sctx->stat_lock);
1239 sctx->stat.unverified_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001240 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001241 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001242
Stefan Behrensff023aa2012-11-06 11:43:11 +01001243 if (sctx->is_dev_replace)
1244 scrub_write_block_to_dev_replace(sblock_bad);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001245 goto out;
1246 }
1247
1248 if (!sblock_bad->no_io_error_seen) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001249 spin_lock(&sctx->stat_lock);
1250 sctx->stat.read_errors++;
1251 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001252 if (__ratelimit(&_rs))
1253 scrub_print_warning("i/o error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001254 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001255 } else if (sblock_bad->checksum_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001256 spin_lock(&sctx->stat_lock);
1257 sctx->stat.csum_errors++;
1258 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001259 if (__ratelimit(&_rs))
1260 scrub_print_warning("checksum error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001261 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001262 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001263 } else if (sblock_bad->header_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001264 spin_lock(&sctx->stat_lock);
1265 sctx->stat.verify_errors++;
1266 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001267 if (__ratelimit(&_rs))
1268 scrub_print_warning("checksum/header error",
1269 sblock_to_check);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001270 if (sblock_bad->generation_error)
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001271 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001272 BTRFS_DEV_STAT_GENERATION_ERRS);
1273 else
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001274 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001275 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001276 }
1277
Ilya Dryomov33ef30a2013-11-03 19:06:38 +02001278 if (sctx->readonly) {
1279 ASSERT(!sctx->is_dev_replace);
1280 goto out;
1281 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001282
1283 if (!is_metadata && !have_csum) {
1284 struct scrub_fixup_nodatasum *fixup_nodatasum;
1285
Stefan Behrensff023aa2012-11-06 11:43:11 +01001286 WARN_ON(sctx->is_dev_replace);
1287
Zhao Leib25c94c2015-01-20 15:11:35 +08001288nodatasum_case:
1289
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001290 /*
1291 * !is_metadata and !have_csum, this means that the data
Nicholas D Steeves01327612016-05-19 21:18:45 -04001292 * might not be COWed, that it might be modified
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001293 * concurrently. The general strategy to work on the
1294 * commit root does not help in the case when COW is not
1295 * used.
1296 */
1297 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1298 if (!fixup_nodatasum)
1299 goto did_not_correct_error;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001300 fixup_nodatasum->sctx = sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001301 fixup_nodatasum->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001302 fixup_nodatasum->logical = logical;
1303 fixup_nodatasum->root = fs_info->extent_root;
1304 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001305 scrub_pending_trans_workers_inc(sctx);
Liu Bo9e0af232014-08-15 23:36:53 +08001306 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1307 scrub_fixup_nodatasum, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001308 btrfs_queue_work(fs_info->scrub_workers,
1309 &fixup_nodatasum->work);
Arne Jansena2de7332011-03-08 14:14:00 +01001310 goto out;
1311 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001312
1313 /*
1314 * now build and submit the bios for the other mirrors, check
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001315 * checksums.
1316 * First try to pick the mirror which is completely without I/O
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001317 * errors and also does not have a checksum error.
1318 * If one is found, and if a checksum is present, the full block
1319 * that is known to contain an error is rewritten. Afterwards
1320 * the block is known to be corrected.
1321 * If a mirror is found which is completely correct, and no
1322 * checksum is present, only those pages are rewritten that had
1323 * an I/O error in the block to be repaired, since it cannot be
1324 * determined, which copy of the other pages is better (and it
1325 * could happen otherwise that a correct page would be
1326 * overwritten by a bad one).
1327 */
1328 for (mirror_index = 0;
1329 mirror_index < BTRFS_MAX_MIRRORS &&
1330 sblocks_for_recheck[mirror_index].page_count > 0;
1331 mirror_index++) {
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001332 struct scrub_block *sblock_other;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001333
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001334 if (mirror_index == failed_mirror_index)
1335 continue;
1336 sblock_other = sblocks_for_recheck + mirror_index;
1337
1338 /* build and submit the bios, check checksums */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001339 scrub_recheck_block(fs_info, sblock_other, 0);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001340
1341 if (!sblock_other->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001342 !sblock_other->checksum_error &&
1343 sblock_other->no_io_error_seen) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01001344 if (sctx->is_dev_replace) {
1345 scrub_write_block_to_dev_replace(sblock_other);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001346 goto corrected_error;
Zhao Lei114ab502015-01-20 15:11:36 +08001347 } else {
1348 ret = scrub_repair_block_from_good_copy(
1349 sblock_bad, sblock_other);
1350 if (!ret)
1351 goto corrected_error;
1352 }
Arne Jansena2de7332011-03-08 14:14:00 +01001353 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001354 }
1355
Zhao Leib968fed2015-01-20 15:11:41 +08001356 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1357 goto did_not_correct_error;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001358
1359 /*
Stefan Behrensff023aa2012-11-06 11:43:11 +01001360 * In case of I/O errors in the area that is supposed to be
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001361 * repaired, continue by picking good copies of those pages.
1362 * Select the good pages from mirrors to rewrite bad pages from
1363 * the area to fix. Afterwards verify the checksum of the block
1364 * that is supposed to be repaired. This verification step is
1365 * only done for the purpose of statistic counting and for the
1366 * final scrub report, whether errors remain.
1367 * A perfect algorithm could make use of the checksum and try
1368 * all possible combinations of pages from the different mirrors
1369 * until the checksum verification succeeds. For example, when
1370 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1371 * of mirror #2 is readable but the final checksum test fails,
1372 * then the 2nd page of mirror #3 could be tried, whether now
Nicholas D Steeves01327612016-05-19 21:18:45 -04001373 * the final checksum succeeds. But this would be a rare
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001374 * exception and is therefore not implemented. At least it is
1375 * avoided that the good copy is overwritten.
1376 * A more useful improvement would be to pick the sectors
1377 * without I/O error based on sector sizes (512 bytes on legacy
1378 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1379 * mirror could be repaired by taking 512 byte of a different
1380 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1381 * area are unreadable.
1382 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001383 success = 1;
Zhao Leib968fed2015-01-20 15:11:41 +08001384 for (page_num = 0; page_num < sblock_bad->page_count;
1385 page_num++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001386 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
Zhao Leib968fed2015-01-20 15:11:41 +08001387 struct scrub_block *sblock_other = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001388
Zhao Leib968fed2015-01-20 15:11:41 +08001389 /* skip no-io-error page in scrub */
1390 if (!page_bad->io_error && !sctx->is_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001391 continue;
1392
Zhao Leib968fed2015-01-20 15:11:41 +08001393 /* try to find no-io-error page in mirrors */
1394 if (page_bad->io_error) {
1395 for (mirror_index = 0;
1396 mirror_index < BTRFS_MAX_MIRRORS &&
1397 sblocks_for_recheck[mirror_index].page_count > 0;
1398 mirror_index++) {
1399 if (!sblocks_for_recheck[mirror_index].
1400 pagev[page_num]->io_error) {
1401 sblock_other = sblocks_for_recheck +
1402 mirror_index;
1403 break;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001404 }
Jan Schmidt13db62b2011-06-13 19:56:13 +02001405 }
Zhao Leib968fed2015-01-20 15:11:41 +08001406 if (!sblock_other)
1407 success = 0;
Jan Schmidt13db62b2011-06-13 19:56:13 +02001408 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001409
Zhao Leib968fed2015-01-20 15:11:41 +08001410 if (sctx->is_dev_replace) {
1411 /*
1412 * did not find a mirror to fetch the page
1413 * from. scrub_write_page_to_dev_replace()
1414 * handles this case (page->io_error), by
1415 * filling the block with zeros before
1416 * submitting the write request
1417 */
1418 if (!sblock_other)
1419 sblock_other = sblock_bad;
1420
1421 if (scrub_write_page_to_dev_replace(sblock_other,
1422 page_num) != 0) {
1423 btrfs_dev_replace_stats_inc(
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001424 &fs_info->dev_replace.num_write_errors);
Zhao Leib968fed2015-01-20 15:11:41 +08001425 success = 0;
1426 }
1427 } else if (sblock_other) {
1428 ret = scrub_repair_page_from_good_copy(sblock_bad,
1429 sblock_other,
1430 page_num, 0);
1431 if (0 == ret)
1432 page_bad->io_error = 0;
1433 else
1434 success = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001435 }
1436 }
1437
Zhao Leib968fed2015-01-20 15:11:41 +08001438 if (success && !sctx->is_dev_replace) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001439 if (is_metadata || have_csum) {
1440 /*
1441 * need to verify the checksum now that all
1442 * sectors on disk are repaired (the write
1443 * request for data to be repaired is on its way).
1444 * Just be lazy and use scrub_recheck_block()
1445 * which re-reads the data before the checksum
1446 * is verified, but most likely the data comes out
1447 * of the page cache.
1448 */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001449 scrub_recheck_block(fs_info, sblock_bad, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001450 if (!sblock_bad->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001451 !sblock_bad->checksum_error &&
1452 sblock_bad->no_io_error_seen)
1453 goto corrected_error;
1454 else
1455 goto did_not_correct_error;
1456 } else {
1457corrected_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001458 spin_lock(&sctx->stat_lock);
1459 sctx->stat.corrected_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001460 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001461 spin_unlock(&sctx->stat_lock);
David Sterbab14af3b2015-10-08 10:43:10 +02001462 btrfs_err_rl_in_rcu(fs_info,
1463 "fixed up error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001464 logical, rcu_str_deref(dev->name));
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001465 }
1466 } else {
1467did_not_correct_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001468 spin_lock(&sctx->stat_lock);
1469 sctx->stat.uncorrectable_errors++;
1470 spin_unlock(&sctx->stat_lock);
David Sterbab14af3b2015-10-08 10:43:10 +02001471 btrfs_err_rl_in_rcu(fs_info,
1472 "unable to fixup (regular) error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001473 logical, rcu_str_deref(dev->name));
Arne Jansena2de7332011-03-08 14:14:00 +01001474 }
1475
1476out:
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001477 if (sblocks_for_recheck) {
1478 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1479 mirror_index++) {
1480 struct scrub_block *sblock = sblocks_for_recheck +
1481 mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001482 struct scrub_recover *recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001483 int page_index;
1484
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001485 for (page_index = 0; page_index < sblock->page_count;
1486 page_index++) {
1487 sblock->pagev[page_index]->sblock = NULL;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001488 recover = sblock->pagev[page_index]->recover;
1489 if (recover) {
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001490 scrub_put_recover(fs_info, recover);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001491 sblock->pagev[page_index]->recover =
1492 NULL;
1493 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001494 scrub_page_put(sblock->pagev[page_index]);
1495 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001496 }
1497 kfree(sblocks_for_recheck);
1498 }
1499
Qu Wenruo28d70e22017-04-14 08:35:55 +08001500 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1501 if (ret < 0)
1502 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001503 return 0;
Arne Jansena2de7332011-03-08 14:14:00 +01001504}
1505
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001506static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001507{
Zhao Lei10f11902015-01-20 15:11:43 +08001508 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1509 return 2;
1510 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1511 return 3;
1512 else
Miao Xieaf8e2d12014-10-23 14:42:50 +08001513 return (int)bbio->num_stripes;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001514}
1515
Zhao Lei10f11902015-01-20 15:11:43 +08001516static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1517 u64 *raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001518 u64 mapped_length,
1519 int nstripes, int mirror,
1520 int *stripe_index,
1521 u64 *stripe_offset)
1522{
1523 int i;
1524
Zhao Leiffe2d202015-01-20 15:11:44 +08001525 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001526 /* RAID5/6 */
1527 for (i = 0; i < nstripes; i++) {
1528 if (raid_map[i] == RAID6_Q_STRIPE ||
1529 raid_map[i] == RAID5_P_STRIPE)
1530 continue;
1531
1532 if (logical >= raid_map[i] &&
1533 logical < raid_map[i] + mapped_length)
1534 break;
1535 }
1536
1537 *stripe_index = i;
1538 *stripe_offset = logical - raid_map[i];
1539 } else {
1540 /* The other RAID type */
1541 *stripe_index = mirror;
1542 *stripe_offset = 0;
1543 }
1544}
1545
Zhao Leibe50a8d2015-01-20 15:11:42 +08001546static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001547 struct scrub_block *sblocks_for_recheck)
Arne Jansena2de7332011-03-08 14:14:00 +01001548{
Zhao Leibe50a8d2015-01-20 15:11:42 +08001549 struct scrub_ctx *sctx = original_sblock->sctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04001550 struct btrfs_fs_info *fs_info = sctx->fs_info;
Zhao Leibe50a8d2015-01-20 15:11:42 +08001551 u64 length = original_sblock->page_count * PAGE_SIZE;
1552 u64 logical = original_sblock->pagev[0]->logical;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001553 u64 generation = original_sblock->pagev[0]->generation;
1554 u64 flags = original_sblock->pagev[0]->flags;
1555 u64 have_csum = original_sblock->pagev[0]->have_csum;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001556 struct scrub_recover *recover;
1557 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001558 u64 sublen;
1559 u64 mapped_length;
1560 u64 stripe_offset;
1561 int stripe_index;
Zhao Leibe50a8d2015-01-20 15:11:42 +08001562 int page_index = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001563 int mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001564 int nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001565 int ret;
1566
1567 /*
Zhao Lei57019342015-01-20 15:11:45 +08001568 * note: the two members refs and outstanding_pages
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001569 * are not used (and not set) in the blocks that are used for
1570 * the recheck procedure
1571 */
1572
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001573 while (length > 0) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001574 sublen = min_t(u64, length, PAGE_SIZE);
1575 mapped_length = sublen;
1576 bbio = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001577
1578 /*
1579 * with a length of PAGE_SIZE, each returned stripe
1580 * represents one mirror
1581 */
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001582 btrfs_bio_counter_inc_blocked(fs_info);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02001583 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
David Sterba825ad4c2017-03-28 14:45:22 +02001584 logical, &mapped_length, &bbio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001585 if (ret || !bbio || mapped_length < sublen) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001586 btrfs_put_bbio(bbio);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001587 btrfs_bio_counter_dec(fs_info);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001588 return -EIO;
1589 }
1590
Miao Xieaf8e2d12014-10-23 14:42:50 +08001591 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1592 if (!recover) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001593 btrfs_put_bbio(bbio);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001594 btrfs_bio_counter_dec(fs_info);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001595 return -ENOMEM;
1596 }
1597
Elena Reshetova6f615012017-03-03 10:55:21 +02001598 refcount_set(&recover->refs, 1);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001599 recover->bbio = bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001600 recover->map_length = mapped_length;
1601
Ashish Samant24731142016-04-29 18:33:59 -07001602 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001603
Zhao Leibe50a8d2015-01-20 15:11:42 +08001604 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
Zhao Lei10f11902015-01-20 15:11:43 +08001605
Miao Xieaf8e2d12014-10-23 14:42:50 +08001606 for (mirror_index = 0; mirror_index < nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001607 mirror_index++) {
1608 struct scrub_block *sblock;
1609 struct scrub_page *page;
1610
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001611 sblock = sblocks_for_recheck + mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001612 sblock->sctx = sctx;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001613
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001614 page = kzalloc(sizeof(*page), GFP_NOFS);
1615 if (!page) {
1616leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001617 spin_lock(&sctx->stat_lock);
1618 sctx->stat.malloc_errors++;
1619 spin_unlock(&sctx->stat_lock);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001620 scrub_put_recover(fs_info, recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001621 return -ENOMEM;
1622 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001623 scrub_page_get(page);
1624 sblock->pagev[page_index] = page;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001625 page->sblock = sblock;
1626 page->flags = flags;
1627 page->generation = generation;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001628 page->logical = logical;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001629 page->have_csum = have_csum;
1630 if (have_csum)
1631 memcpy(page->csum,
1632 original_sblock->pagev[0]->csum,
1633 sctx->csum_size);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001634
Zhao Lei10f11902015-01-20 15:11:43 +08001635 scrub_stripe_index_and_offset(logical,
1636 bbio->map_type,
1637 bbio->raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001638 mapped_length,
Zhao Leie34c3302015-01-20 15:11:31 +08001639 bbio->num_stripes -
1640 bbio->num_tgtdevs,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001641 mirror_index,
1642 &stripe_index,
1643 &stripe_offset);
1644 page->physical = bbio->stripes[stripe_index].physical +
1645 stripe_offset;
1646 page->dev = bbio->stripes[stripe_index].dev;
1647
Stefan Behrensff023aa2012-11-06 11:43:11 +01001648 BUG_ON(page_index >= original_sblock->page_count);
1649 page->physical_for_dev_replace =
1650 original_sblock->pagev[page_index]->
1651 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001652 /* for missing devices, dev->bdev is NULL */
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001653 page->mirror_num = mirror_index + 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001654 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001655 page->page = alloc_page(GFP_NOFS);
1656 if (!page->page)
1657 goto leave_nomem;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001658
1659 scrub_get_recover(recover);
1660 page->recover = recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001661 }
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001662 scrub_put_recover(fs_info, recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001663 length -= sublen;
1664 logical += sublen;
1665 page_index++;
1666 }
1667
1668 return 0;
1669}
1670
Miao Xieaf8e2d12014-10-23 14:42:50 +08001671struct scrub_bio_ret {
1672 struct completion event;
1673 int error;
1674};
1675
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001676static void scrub_bio_wait_endio(struct bio *bio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001677{
1678 struct scrub_bio_ret *ret = bio->bi_private;
1679
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001680 ret->error = bio->bi_error;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001681 complete(&ret->event);
1682}
1683
1684static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1685{
Zhao Lei10f11902015-01-20 15:11:43 +08001686 return page->recover &&
Zhao Leiffe2d202015-01-20 15:11:44 +08001687 (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001688}
1689
1690static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1691 struct bio *bio,
1692 struct scrub_page *page)
1693{
1694 struct scrub_bio_ret done;
1695 int ret;
1696
1697 init_completion(&done.event);
1698 done.error = 0;
1699 bio->bi_iter.bi_sector = page->logical >> 9;
1700 bio->bi_private = &done;
1701 bio->bi_end_io = scrub_bio_wait_endio;
1702
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001703 ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001704 page->recover->map_length,
Miao Xie42452152014-11-25 16:39:28 +08001705 page->mirror_num, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001706 if (ret)
1707 return ret;
1708
1709 wait_for_completion(&done.event);
1710 if (done.error)
1711 return -EIO;
1712
1713 return 0;
1714}
1715
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001716/*
1717 * this function will check the on disk data for checksum errors, header
1718 * errors and read I/O errors. If any I/O errors happen, the exact pages
1719 * which are errored are marked as being bad. The goal is to enable scrub
1720 * to take those pages that are not errored from all the mirrors so that
1721 * the pages that are errored in the just handled mirror can be repaired.
1722 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001723static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
Zhao Leiaffe4a52015-08-24 21:32:06 +08001724 struct scrub_block *sblock,
1725 int retry_failed_mirror)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001726{
1727 int page_num;
1728
1729 sblock->no_io_error_seen = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001730
1731 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1732 struct bio *bio;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001733 struct scrub_page *page = sblock->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001734
Stefan Behrens442a4f62012-05-25 16:06:08 +02001735 if (page->dev->bdev == NULL) {
Stefan Behrensea9947b2012-05-04 15:16:07 -04001736 page->io_error = 1;
1737 sblock->no_io_error_seen = 0;
1738 continue;
1739 }
1740
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001741 WARN_ON(!page->page);
Chris Mason9be33952013-05-17 18:30:14 -04001742 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001743 if (!bio) {
1744 page->io_error = 1;
1745 sblock->no_io_error_seen = 0;
1746 continue;
1747 }
Stefan Behrens442a4f62012-05-25 16:06:08 +02001748 bio->bi_bdev = page->dev->bdev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001749
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001750 bio_add_page(bio, page->page, PAGE_SIZE, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001751 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
Liu Bo1bcd7aa2017-03-29 10:55:16 -07001752 if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) {
1753 page->io_error = 1;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001754 sblock->no_io_error_seen = 0;
Liu Bo1bcd7aa2017-03-29 10:55:16 -07001755 }
Miao Xieaf8e2d12014-10-23 14:42:50 +08001756 } else {
1757 bio->bi_iter.bi_sector = page->physical >> 9;
Mike Christie37226b22016-06-05 14:31:52 -05001758 bio_set_op_attrs(bio, REQ_OP_READ, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001759
Liu Bo1bcd7aa2017-03-29 10:55:16 -07001760 if (btrfsic_submit_bio_wait(bio)) {
1761 page->io_error = 1;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001762 sblock->no_io_error_seen = 0;
Liu Bo1bcd7aa2017-03-29 10:55:16 -07001763 }
Miao Xieaf8e2d12014-10-23 14:42:50 +08001764 }
Kent Overstreet33879d42013-11-23 22:33:32 -08001765
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001766 bio_put(bio);
1767 }
1768
1769 if (sblock->no_io_error_seen)
Zhao Leiba7cf982015-08-24 21:18:02 +08001770 scrub_recheck_block_checksum(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001771}
1772
Miao Xie17a9be22014-07-24 11:37:08 +08001773static inline int scrub_check_fsid(u8 fsid[],
1774 struct scrub_page *spage)
1775{
1776 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1777 int ret;
1778
1779 ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1780 return !ret;
1781}
1782
Zhao Leiba7cf982015-08-24 21:18:02 +08001783static void scrub_recheck_block_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001784{
Zhao Leiba7cf982015-08-24 21:18:02 +08001785 sblock->header_error = 0;
1786 sblock->checksum_error = 0;
1787 sblock->generation_error = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001788
Zhao Leiba7cf982015-08-24 21:18:02 +08001789 if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1790 scrub_checksum_data(sblock);
1791 else
1792 scrub_checksum_tree_block(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001793}
1794
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001795static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +08001796 struct scrub_block *sblock_good)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001797{
1798 int page_num;
1799 int ret = 0;
1800
1801 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1802 int ret_sub;
1803
1804 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1805 sblock_good,
Zhao Lei114ab502015-01-20 15:11:36 +08001806 page_num, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001807 if (ret_sub)
1808 ret = ret_sub;
1809 }
1810
1811 return ret;
1812}
1813
1814static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1815 struct scrub_block *sblock_good,
1816 int page_num, int force_write)
1817{
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001818 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1819 struct scrub_page *page_good = sblock_good->pagev[page_num];
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001820 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001821
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001822 BUG_ON(page_bad->page == NULL);
1823 BUG_ON(page_good->page == NULL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001824 if (force_write || sblock_bad->header_error ||
1825 sblock_bad->checksum_error || page_bad->io_error) {
1826 struct bio *bio;
1827 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001828
Stefan Behrensff023aa2012-11-06 11:43:11 +01001829 if (!page_bad->dev->bdev) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001830 btrfs_warn_rl(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001831 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
Stefan Behrensff023aa2012-11-06 11:43:11 +01001832 return -EIO;
1833 }
1834
Chris Mason9be33952013-05-17 18:30:14 -04001835 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Tsutomu Itohe627ee72012-04-12 16:03:56 -04001836 if (!bio)
1837 return -EIO;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001838 bio->bi_bdev = page_bad->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001839 bio->bi_iter.bi_sector = page_bad->physical >> 9;
Mike Christie37226b22016-06-05 14:31:52 -05001840 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001841
1842 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1843 if (PAGE_SIZE != ret) {
1844 bio_put(bio);
1845 return -EIO;
1846 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001847
Mike Christie4e49ea42016-06-05 14:31:41 -05001848 if (btrfsic_submit_bio_wait(bio)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001849 btrfs_dev_stat_inc_and_print(page_bad->dev,
1850 BTRFS_DEV_STAT_WRITE_ERRS);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001851 btrfs_dev_replace_stats_inc(
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001852 &fs_info->dev_replace.num_write_errors);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001853 bio_put(bio);
1854 return -EIO;
1855 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001856 bio_put(bio);
1857 }
1858
1859 return 0;
1860}
1861
Stefan Behrensff023aa2012-11-06 11:43:11 +01001862static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1863{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001864 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001865 int page_num;
1866
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001867 /*
1868 * This block is used for the check of the parity on the source device,
1869 * so the data needn't be written into the destination device.
1870 */
1871 if (sblock->sparity)
1872 return;
1873
Stefan Behrensff023aa2012-11-06 11:43:11 +01001874 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1875 int ret;
1876
1877 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1878 if (ret)
1879 btrfs_dev_replace_stats_inc(
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001880 &fs_info->dev_replace.num_write_errors);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001881 }
1882}
1883
1884static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1885 int page_num)
1886{
1887 struct scrub_page *spage = sblock->pagev[page_num];
1888
1889 BUG_ON(spage->page == NULL);
1890 if (spage->io_error) {
1891 void *mapped_buffer = kmap_atomic(spage->page);
1892
David Sterba619a9742017-03-29 20:48:44 +02001893 clear_page(mapped_buffer);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001894 flush_dcache_page(spage->page);
1895 kunmap_atomic(mapped_buffer);
1896 }
1897 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1898}
1899
1900static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1901 struct scrub_page *spage)
1902{
1903 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1904 struct scrub_bio *sbio;
1905 int ret;
1906
1907 mutex_lock(&wr_ctx->wr_lock);
1908again:
1909 if (!wr_ctx->wr_curr_bio) {
1910 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
David Sterba58c4e172016-02-11 10:49:42 +01001911 GFP_KERNEL);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001912 if (!wr_ctx->wr_curr_bio) {
1913 mutex_unlock(&wr_ctx->wr_lock);
1914 return -ENOMEM;
1915 }
1916 wr_ctx->wr_curr_bio->sctx = sctx;
1917 wr_ctx->wr_curr_bio->page_count = 0;
1918 }
1919 sbio = wr_ctx->wr_curr_bio;
1920 if (sbio->page_count == 0) {
1921 struct bio *bio;
1922
1923 sbio->physical = spage->physical_for_dev_replace;
1924 sbio->logical = spage->logical;
1925 sbio->dev = wr_ctx->tgtdev;
1926 bio = sbio->bio;
1927 if (!bio) {
David Sterba58c4e172016-02-11 10:49:42 +01001928 bio = btrfs_io_bio_alloc(GFP_KERNEL,
1929 wr_ctx->pages_per_wr_bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001930 if (!bio) {
1931 mutex_unlock(&wr_ctx->wr_lock);
1932 return -ENOMEM;
1933 }
1934 sbio->bio = bio;
1935 }
1936
1937 bio->bi_private = sbio;
1938 bio->bi_end_io = scrub_wr_bio_end_io;
1939 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001940 bio->bi_iter.bi_sector = sbio->physical >> 9;
Mike Christie37226b22016-06-05 14:31:52 -05001941 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001942 sbio->err = 0;
1943 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1944 spage->physical_for_dev_replace ||
1945 sbio->logical + sbio->page_count * PAGE_SIZE !=
1946 spage->logical) {
1947 scrub_wr_submit(sctx);
1948 goto again;
1949 }
1950
1951 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1952 if (ret != PAGE_SIZE) {
1953 if (sbio->page_count < 1) {
1954 bio_put(sbio->bio);
1955 sbio->bio = NULL;
1956 mutex_unlock(&wr_ctx->wr_lock);
1957 return -EIO;
1958 }
1959 scrub_wr_submit(sctx);
1960 goto again;
1961 }
1962
1963 sbio->pagev[sbio->page_count] = spage;
1964 scrub_page_get(spage);
1965 sbio->page_count++;
1966 if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1967 scrub_wr_submit(sctx);
1968 mutex_unlock(&wr_ctx->wr_lock);
1969
1970 return 0;
1971}
1972
1973static void scrub_wr_submit(struct scrub_ctx *sctx)
1974{
1975 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1976 struct scrub_bio *sbio;
1977
1978 if (!wr_ctx->wr_curr_bio)
1979 return;
1980
1981 sbio = wr_ctx->wr_curr_bio;
1982 wr_ctx->wr_curr_bio = NULL;
1983 WARN_ON(!sbio->bio->bi_bdev);
1984 scrub_pending_bio_inc(sctx);
1985 /* process all writes in a single worker thread. Then the block layer
1986 * orders the requests before sending them to the driver which
1987 * doubled the write performance on spinning disks when measured
1988 * with Linux 3.5 */
Mike Christie4e49ea42016-06-05 14:31:41 -05001989 btrfsic_submit_bio(sbio->bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001990}
1991
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001992static void scrub_wr_bio_end_io(struct bio *bio)
Stefan Behrensff023aa2012-11-06 11:43:11 +01001993{
1994 struct scrub_bio *sbio = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04001995 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001996
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001997 sbio->err = bio->bi_error;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001998 sbio->bio = bio;
1999
Liu Bo9e0af232014-08-15 23:36:53 +08002000 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
2001 scrub_wr_bio_end_io_worker, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08002002 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002003}
2004
2005static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
2006{
2007 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2008 struct scrub_ctx *sctx = sbio->sctx;
2009 int i;
2010
2011 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
2012 if (sbio->err) {
2013 struct btrfs_dev_replace *dev_replace =
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002014 &sbio->sctx->fs_info->dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002015
2016 for (i = 0; i < sbio->page_count; i++) {
2017 struct scrub_page *spage = sbio->pagev[i];
2018
2019 spage->io_error = 1;
2020 btrfs_dev_replace_stats_inc(&dev_replace->
2021 num_write_errors);
2022 }
2023 }
2024
2025 for (i = 0; i < sbio->page_count; i++)
2026 scrub_page_put(sbio->pagev[i]);
2027
2028 bio_put(sbio->bio);
2029 kfree(sbio);
2030 scrub_pending_bio_dec(sctx);
2031}
2032
2033static int scrub_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002034{
2035 u64 flags;
2036 int ret;
2037
Zhao Leiba7cf982015-08-24 21:18:02 +08002038 /*
2039 * No need to initialize these stats currently,
2040 * because this function only use return value
2041 * instead of these stats value.
2042 *
2043 * Todo:
2044 * always use stats
2045 */
2046 sblock->header_error = 0;
2047 sblock->generation_error = 0;
2048 sblock->checksum_error = 0;
2049
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002050 WARN_ON(sblock->page_count < 1);
2051 flags = sblock->pagev[0]->flags;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002052 ret = 0;
2053 if (flags & BTRFS_EXTENT_FLAG_DATA)
2054 ret = scrub_checksum_data(sblock);
2055 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2056 ret = scrub_checksum_tree_block(sblock);
2057 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
2058 (void)scrub_checksum_super(sblock);
2059 else
2060 WARN_ON(1);
2061 if (ret)
2062 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002063
2064 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002065}
2066
2067static int scrub_checksum_data(struct scrub_block *sblock)
2068{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002069 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01002070 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002071 u8 *on_disk_csum;
2072 struct page *page;
2073 void *buffer;
Arne Jansena2de7332011-03-08 14:14:00 +01002074 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002075 u64 len;
2076 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01002077
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002078 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002079 if (!sblock->pagev[0]->have_csum)
Arne Jansena2de7332011-03-08 14:14:00 +01002080 return 0;
2081
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002082 on_disk_csum = sblock->pagev[0]->csum;
2083 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002084 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002085
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002086 len = sctx->sectorsize;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002087 index = 0;
2088 for (;;) {
2089 u64 l = min_t(u64, len, PAGE_SIZE);
2090
Liu Bob0496682013-03-14 14:57:45 +00002091 crc = btrfs_csum_data(buffer, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07002092 kunmap_atomic(buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002093 len -= l;
2094 if (len == 0)
2095 break;
2096 index++;
2097 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002098 BUG_ON(!sblock->pagev[index]->page);
2099 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002100 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002101 }
2102
Arne Jansena2de7332011-03-08 14:14:00 +01002103 btrfs_csum_final(crc, csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002104 if (memcmp(csum, on_disk_csum, sctx->csum_size))
Zhao Leiba7cf982015-08-24 21:18:02 +08002105 sblock->checksum_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002106
Zhao Leiba7cf982015-08-24 21:18:02 +08002107 return sblock->checksum_error;
Arne Jansena2de7332011-03-08 14:14:00 +01002108}
2109
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002110static int scrub_checksum_tree_block(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01002111{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002112 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01002113 struct btrfs_header *h;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002114 struct btrfs_fs_info *fs_info = sctx->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002115 u8 calculated_csum[BTRFS_CSUM_SIZE];
2116 u8 on_disk_csum[BTRFS_CSUM_SIZE];
2117 struct page *page;
2118 void *mapped_buffer;
2119 u64 mapped_size;
2120 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01002121 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002122 u64 len;
2123 int index;
2124
2125 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002126 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002127 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002128 h = (struct btrfs_header *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002129 memcpy(on_disk_csum, h->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01002130
2131 /*
2132 * we don't use the getter functions here, as we
2133 * a) don't have an extent buffer and
2134 * b) the page is already kmapped
2135 */
Qu Wenruo3cae2102013-07-16 11:19:18 +08002136 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
Zhao Leiba7cf982015-08-24 21:18:02 +08002137 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002138
Zhao Leiba7cf982015-08-24 21:18:02 +08002139 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
2140 sblock->header_error = 1;
2141 sblock->generation_error = 1;
2142 }
Arne Jansena2de7332011-03-08 14:14:00 +01002143
Miao Xie17a9be22014-07-24 11:37:08 +08002144 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
Zhao Leiba7cf982015-08-24 21:18:02 +08002145 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002146
2147 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
2148 BTRFS_UUID_SIZE))
Zhao Leiba7cf982015-08-24 21:18:02 +08002149 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002150
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002151 len = sctx->nodesize - BTRFS_CSUM_SIZE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002152 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2153 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2154 index = 0;
2155 for (;;) {
2156 u64 l = min_t(u64, len, mapped_size);
2157
Liu Bob0496682013-03-14 14:57:45 +00002158 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07002159 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002160 len -= l;
2161 if (len == 0)
2162 break;
2163 index++;
2164 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002165 BUG_ON(!sblock->pagev[index]->page);
2166 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002167 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002168 mapped_size = PAGE_SIZE;
2169 p = mapped_buffer;
2170 }
2171
2172 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002173 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Zhao Leiba7cf982015-08-24 21:18:02 +08002174 sblock->checksum_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002175
Zhao Leiba7cf982015-08-24 21:18:02 +08002176 return sblock->header_error || sblock->checksum_error;
Arne Jansena2de7332011-03-08 14:14:00 +01002177}
2178
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002179static int scrub_checksum_super(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01002180{
2181 struct btrfs_super_block *s;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002182 struct scrub_ctx *sctx = sblock->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002183 u8 calculated_csum[BTRFS_CSUM_SIZE];
2184 u8 on_disk_csum[BTRFS_CSUM_SIZE];
2185 struct page *page;
2186 void *mapped_buffer;
2187 u64 mapped_size;
2188 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01002189 u32 crc = ~(u32)0;
Stefan Behrens442a4f62012-05-25 16:06:08 +02002190 int fail_gen = 0;
2191 int fail_cor = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002192 u64 len;
2193 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01002194
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002195 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002196 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002197 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002198 s = (struct btrfs_super_block *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002199 memcpy(on_disk_csum, s->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01002200
Qu Wenruo3cae2102013-07-16 11:19:18 +08002201 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002202 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002203
Qu Wenruo3cae2102013-07-16 11:19:18 +08002204 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002205 ++fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002206
Miao Xie17a9be22014-07-24 11:37:08 +08002207 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002208 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002209
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002210 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2211 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2212 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2213 index = 0;
2214 for (;;) {
2215 u64 l = min_t(u64, len, mapped_size);
2216
Liu Bob0496682013-03-14 14:57:45 +00002217 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07002218 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002219 len -= l;
2220 if (len == 0)
2221 break;
2222 index++;
2223 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002224 BUG_ON(!sblock->pagev[index]->page);
2225 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002226 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002227 mapped_size = PAGE_SIZE;
2228 p = mapped_buffer;
2229 }
2230
2231 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002232 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002233 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002234
Stefan Behrens442a4f62012-05-25 16:06:08 +02002235 if (fail_cor + fail_gen) {
Arne Jansena2de7332011-03-08 14:14:00 +01002236 /*
2237 * if we find an error in a super block, we just report it.
2238 * They will get written with the next transaction commit
2239 * anyway
2240 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002241 spin_lock(&sctx->stat_lock);
2242 ++sctx->stat.super_errors;
2243 spin_unlock(&sctx->stat_lock);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002244 if (fail_cor)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002245 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002246 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2247 else
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002248 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002249 BTRFS_DEV_STAT_GENERATION_ERRS);
Arne Jansena2de7332011-03-08 14:14:00 +01002250 }
2251
Stefan Behrens442a4f62012-05-25 16:06:08 +02002252 return fail_cor + fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002253}
2254
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002255static void scrub_block_get(struct scrub_block *sblock)
2256{
Elena Reshetova186debd2017-03-03 10:55:23 +02002257 refcount_inc(&sblock->refs);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002258}
2259
2260static void scrub_block_put(struct scrub_block *sblock)
2261{
Elena Reshetova186debd2017-03-03 10:55:23 +02002262 if (refcount_dec_and_test(&sblock->refs)) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002263 int i;
2264
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002265 if (sblock->sparity)
2266 scrub_parity_put(sblock->sparity);
2267
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002268 for (i = 0; i < sblock->page_count; i++)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002269 scrub_page_put(sblock->pagev[i]);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002270 kfree(sblock);
2271 }
2272}
2273
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002274static void scrub_page_get(struct scrub_page *spage)
2275{
Zhao Lei57019342015-01-20 15:11:45 +08002276 atomic_inc(&spage->refs);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002277}
2278
2279static void scrub_page_put(struct scrub_page *spage)
2280{
Zhao Lei57019342015-01-20 15:11:45 +08002281 if (atomic_dec_and_test(&spage->refs)) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002282 if (spage->page)
2283 __free_page(spage->page);
2284 kfree(spage);
2285 }
2286}
2287
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002288static void scrub_submit(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +01002289{
2290 struct scrub_bio *sbio;
2291
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002292 if (sctx->curr == -1)
Stefan Behrens1623ede2012-03-27 14:21:26 -04002293 return;
Arne Jansena2de7332011-03-08 14:14:00 +01002294
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002295 sbio = sctx->bios[sctx->curr];
2296 sctx->curr = -1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002297 scrub_pending_bio_inc(sctx);
Mike Christie4e49ea42016-06-05 14:31:41 -05002298 btrfsic_submit_bio(sbio->bio);
Arne Jansena2de7332011-03-08 14:14:00 +01002299}
2300
Stefan Behrensff023aa2012-11-06 11:43:11 +01002301static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2302 struct scrub_page *spage)
Arne Jansena2de7332011-03-08 14:14:00 +01002303{
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002304 struct scrub_block *sblock = spage->sblock;
Arne Jansena2de7332011-03-08 14:14:00 +01002305 struct scrub_bio *sbio;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002306 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +01002307
2308again:
2309 /*
2310 * grab a fresh bio or wait for one to become available
2311 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002312 while (sctx->curr == -1) {
2313 spin_lock(&sctx->list_lock);
2314 sctx->curr = sctx->first_free;
2315 if (sctx->curr != -1) {
2316 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2317 sctx->bios[sctx->curr]->next_free = -1;
2318 sctx->bios[sctx->curr]->page_count = 0;
2319 spin_unlock(&sctx->list_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002320 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002321 spin_unlock(&sctx->list_lock);
2322 wait_event(sctx->list_wait, sctx->first_free != -1);
Arne Jansena2de7332011-03-08 14:14:00 +01002323 }
2324 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002325 sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002326 if (sbio->page_count == 0) {
Arne Jansen69f4cb52011-11-11 08:17:10 -05002327 struct bio *bio;
2328
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002329 sbio->physical = spage->physical;
2330 sbio->logical = spage->logical;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002331 sbio->dev = spage->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002332 bio = sbio->bio;
2333 if (!bio) {
David Sterba58c4e172016-02-11 10:49:42 +01002334 bio = btrfs_io_bio_alloc(GFP_KERNEL,
2335 sctx->pages_per_rd_bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002336 if (!bio)
2337 return -ENOMEM;
2338 sbio->bio = bio;
2339 }
Arne Jansen69f4cb52011-11-11 08:17:10 -05002340
2341 bio->bi_private = sbio;
2342 bio->bi_end_io = scrub_bio_end_io;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002343 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002344 bio->bi_iter.bi_sector = sbio->physical >> 9;
Mike Christie37226b22016-06-05 14:31:52 -05002345 bio_set_op_attrs(bio, REQ_OP_READ, 0);
Arne Jansen69f4cb52011-11-11 08:17:10 -05002346 sbio->err = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002347 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2348 spage->physical ||
2349 sbio->logical + sbio->page_count * PAGE_SIZE !=
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002350 spage->logical ||
2351 sbio->dev != spage->dev) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002352 scrub_submit(sctx);
Arne Jansen69f4cb52011-11-11 08:17:10 -05002353 goto again;
2354 }
2355
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002356 sbio->pagev[sbio->page_count] = spage;
2357 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2358 if (ret != PAGE_SIZE) {
2359 if (sbio->page_count < 1) {
2360 bio_put(sbio->bio);
2361 sbio->bio = NULL;
2362 return -EIO;
2363 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002364 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002365 goto again;
Arne Jansena2de7332011-03-08 14:14:00 +01002366 }
Arne Jansen1bc87792011-05-28 21:57:55 +02002367
Stefan Behrensff023aa2012-11-06 11:43:11 +01002368 scrub_block_get(sblock); /* one for the page added to the bio */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002369 atomic_inc(&sblock->outstanding_pages);
2370 sbio->page_count++;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002371 if (sbio->page_count == sctx->pages_per_rd_bio)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002372 scrub_submit(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002373
2374 return 0;
2375}
2376
Linus Torvalds22365972015-09-05 15:14:43 -07002377static void scrub_missing_raid56_end_io(struct bio *bio)
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002378{
2379 struct scrub_block *sblock = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002380 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002381
Linus Torvalds22365972015-09-05 15:14:43 -07002382 if (bio->bi_error)
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002383 sblock->no_io_error_seen = 0;
2384
Scott Talbert46732722016-05-09 09:14:28 -04002385 bio_put(bio);
2386
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002387 btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2388}
2389
2390static void scrub_missing_raid56_worker(struct btrfs_work *work)
2391{
2392 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2393 struct scrub_ctx *sctx = sblock->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002394 struct btrfs_fs_info *fs_info = sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002395 u64 logical;
2396 struct btrfs_device *dev;
2397
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002398 logical = sblock->pagev[0]->logical;
2399 dev = sblock->pagev[0]->dev;
2400
Zhao Leiaffe4a52015-08-24 21:32:06 +08002401 if (sblock->no_io_error_seen)
Zhao Leiba7cf982015-08-24 21:18:02 +08002402 scrub_recheck_block_checksum(sblock);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002403
2404 if (!sblock->no_io_error_seen) {
2405 spin_lock(&sctx->stat_lock);
2406 sctx->stat.read_errors++;
2407 spin_unlock(&sctx->stat_lock);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002408 btrfs_err_rl_in_rcu(fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02002409 "IO error rebuilding logical %llu for dev %s",
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002410 logical, rcu_str_deref(dev->name));
2411 } else if (sblock->header_error || sblock->checksum_error) {
2412 spin_lock(&sctx->stat_lock);
2413 sctx->stat.uncorrectable_errors++;
2414 spin_unlock(&sctx->stat_lock);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002415 btrfs_err_rl_in_rcu(fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02002416 "failed to rebuild valid logical %llu for dev %s",
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002417 logical, rcu_str_deref(dev->name));
2418 } else {
2419 scrub_write_block_to_dev_replace(sblock);
2420 }
2421
2422 scrub_block_put(sblock);
2423
2424 if (sctx->is_dev_replace &&
2425 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2426 mutex_lock(&sctx->wr_ctx.wr_lock);
2427 scrub_wr_submit(sctx);
2428 mutex_unlock(&sctx->wr_ctx.wr_lock);
2429 }
2430
2431 scrub_pending_bio_dec(sctx);
2432}
2433
2434static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2435{
2436 struct scrub_ctx *sctx = sblock->sctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002437 struct btrfs_fs_info *fs_info = sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002438 u64 length = sblock->page_count * PAGE_SIZE;
2439 u64 logical = sblock->pagev[0]->logical;
Zhao Leif1fee652016-05-17 17:37:38 +08002440 struct btrfs_bio *bbio = NULL;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002441 struct bio *bio;
2442 struct btrfs_raid_bio *rbio;
2443 int ret;
2444 int i;
2445
Qu Wenruoae6529c2017-03-29 09:33:21 +08002446 btrfs_bio_counter_inc_blocked(fs_info);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02002447 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
David Sterba825ad4c2017-03-28 14:45:22 +02002448 &length, &bbio);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002449 if (ret || !bbio || !bbio->raid_map)
2450 goto bbio_out;
2451
2452 if (WARN_ON(!sctx->is_dev_replace ||
2453 !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2454 /*
2455 * We shouldn't be scrubbing a missing device. Even for dev
2456 * replace, we should only get here for RAID 5/6. We either
2457 * managed to mount something with no mirrors remaining or
2458 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2459 */
2460 goto bbio_out;
2461 }
2462
2463 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2464 if (!bio)
2465 goto bbio_out;
2466
2467 bio->bi_iter.bi_sector = logical >> 9;
2468 bio->bi_private = sblock;
2469 bio->bi_end_io = scrub_missing_raid56_end_io;
2470
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04002471 rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002472 if (!rbio)
2473 goto rbio_out;
2474
2475 for (i = 0; i < sblock->page_count; i++) {
2476 struct scrub_page *spage = sblock->pagev[i];
2477
2478 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2479 }
2480
2481 btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2482 scrub_missing_raid56_worker, NULL, NULL);
2483 scrub_block_get(sblock);
2484 scrub_pending_bio_inc(sctx);
2485 raid56_submit_missing_rbio(rbio);
2486 return;
2487
2488rbio_out:
2489 bio_put(bio);
2490bbio_out:
Qu Wenruoae6529c2017-03-29 09:33:21 +08002491 btrfs_bio_counter_dec(fs_info);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002492 btrfs_put_bbio(bbio);
2493 spin_lock(&sctx->stat_lock);
2494 sctx->stat.malloc_errors++;
2495 spin_unlock(&sctx->stat_lock);
2496}
2497
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002498static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002499 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002500 u64 gen, int mirror_num, u8 *csum, int force,
2501 u64 physical_for_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002502{
2503 struct scrub_block *sblock;
2504 int index;
2505
David Sterba58c4e172016-02-11 10:49:42 +01002506 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002507 if (!sblock) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002508 spin_lock(&sctx->stat_lock);
2509 sctx->stat.malloc_errors++;
2510 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002511 return -ENOMEM;
2512 }
2513
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002514 /* one ref inside this function, plus one for each page added to
2515 * a bio later on */
Elena Reshetova186debd2017-03-03 10:55:23 +02002516 refcount_set(&sblock->refs, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002517 sblock->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002518 sblock->no_io_error_seen = 1;
2519
2520 for (index = 0; len > 0; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002521 struct scrub_page *spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002522 u64 l = min_t(u64, len, PAGE_SIZE);
2523
David Sterba58c4e172016-02-11 10:49:42 +01002524 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002525 if (!spage) {
2526leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002527 spin_lock(&sctx->stat_lock);
2528 sctx->stat.malloc_errors++;
2529 spin_unlock(&sctx->stat_lock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002530 scrub_block_put(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002531 return -ENOMEM;
2532 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002533 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2534 scrub_page_get(spage);
2535 sblock->pagev[index] = spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002536 spage->sblock = sblock;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002537 spage->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002538 spage->flags = flags;
2539 spage->generation = gen;
2540 spage->logical = logical;
2541 spage->physical = physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002542 spage->physical_for_dev_replace = physical_for_dev_replace;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002543 spage->mirror_num = mirror_num;
2544 if (csum) {
2545 spage->have_csum = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002546 memcpy(spage->csum, csum, sctx->csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002547 } else {
2548 spage->have_csum = 0;
2549 }
2550 sblock->page_count++;
David Sterba58c4e172016-02-11 10:49:42 +01002551 spage->page = alloc_page(GFP_KERNEL);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002552 if (!spage->page)
2553 goto leave_nomem;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002554 len -= l;
2555 logical += l;
2556 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002557 physical_for_dev_replace += l;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002558 }
2559
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002560 WARN_ON(sblock->page_count == 0);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002561 if (dev->missing) {
2562 /*
2563 * This case should only be hit for RAID 5/6 device replace. See
2564 * the comment in scrub_missing_raid56_pages() for details.
2565 */
2566 scrub_missing_raid56_pages(sblock);
2567 } else {
2568 for (index = 0; index < sblock->page_count; index++) {
2569 struct scrub_page *spage = sblock->pagev[index];
2570 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002571
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002572 ret = scrub_add_page_to_rd_bio(sctx, spage);
2573 if (ret) {
2574 scrub_block_put(sblock);
2575 return ret;
2576 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002577 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002578
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002579 if (force)
2580 scrub_submit(sctx);
2581 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002582
2583 /* last one frees, either here or in bio completion for last page */
2584 scrub_block_put(sblock);
2585 return 0;
2586}
2587
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002588static void scrub_bio_end_io(struct bio *bio)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002589{
2590 struct scrub_bio *sbio = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002591 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002592
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002593 sbio->err = bio->bi_error;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002594 sbio->bio = bio;
2595
Qu Wenruo0339ef22014-02-28 10:46:17 +08002596 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002597}
2598
2599static void scrub_bio_end_io_worker(struct btrfs_work *work)
2600{
2601 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002602 struct scrub_ctx *sctx = sbio->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002603 int i;
2604
Stefan Behrensff023aa2012-11-06 11:43:11 +01002605 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002606 if (sbio->err) {
2607 for (i = 0; i < sbio->page_count; i++) {
2608 struct scrub_page *spage = sbio->pagev[i];
2609
2610 spage->io_error = 1;
2611 spage->sblock->no_io_error_seen = 0;
2612 }
2613 }
2614
2615 /* now complete the scrub_block items that have all pages completed */
2616 for (i = 0; i < sbio->page_count; i++) {
2617 struct scrub_page *spage = sbio->pagev[i];
2618 struct scrub_block *sblock = spage->sblock;
2619
2620 if (atomic_dec_and_test(&sblock->outstanding_pages))
2621 scrub_block_complete(sblock);
2622 scrub_block_put(sblock);
2623 }
2624
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002625 bio_put(sbio->bio);
2626 sbio->bio = NULL;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002627 spin_lock(&sctx->list_lock);
2628 sbio->next_free = sctx->first_free;
2629 sctx->first_free = sbio->index;
2630 spin_unlock(&sctx->list_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002631
2632 if (sctx->is_dev_replace &&
2633 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2634 mutex_lock(&sctx->wr_ctx.wr_lock);
2635 scrub_wr_submit(sctx);
2636 mutex_unlock(&sctx->wr_ctx.wr_lock);
2637 }
2638
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002639 scrub_pending_bio_dec(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002640}
2641
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002642static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2643 unsigned long *bitmap,
2644 u64 start, u64 len)
2645{
Liu Bo972d7212017-04-03 13:45:33 -07002646 u64 offset;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002647 int nsectors;
Jeff Mahoneyda170662016-06-15 09:22:56 -04002648 int sectorsize = sparity->sctx->fs_info->sectorsize;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002649
2650 if (len >= sparity->stripe_len) {
2651 bitmap_set(bitmap, 0, sparity->nsectors);
2652 return;
2653 }
2654
2655 start -= sparity->logic_start;
Liu Bo972d7212017-04-03 13:45:33 -07002656 start = div64_u64_rem(start, sparity->stripe_len, &offset);
2657 offset = div_u64(offset, sectorsize);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002658 nsectors = (int)len / sectorsize;
2659
2660 if (offset + nsectors <= sparity->nsectors) {
2661 bitmap_set(bitmap, offset, nsectors);
2662 return;
2663 }
2664
2665 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2666 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2667}
2668
2669static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2670 u64 start, u64 len)
2671{
2672 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2673}
2674
2675static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2676 u64 start, u64 len)
2677{
2678 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2679}
2680
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002681static void scrub_block_complete(struct scrub_block *sblock)
2682{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002683 int corrupted = 0;
2684
Stefan Behrensff023aa2012-11-06 11:43:11 +01002685 if (!sblock->no_io_error_seen) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002686 corrupted = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002687 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002688 } else {
2689 /*
2690 * if has checksum error, write via repair mechanism in
2691 * dev replace case, otherwise write here in dev replace
2692 * case.
2693 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002694 corrupted = scrub_checksum(sblock);
2695 if (!corrupted && sblock->sctx->is_dev_replace)
Stefan Behrensff023aa2012-11-06 11:43:11 +01002696 scrub_write_block_to_dev_replace(sblock);
2697 }
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002698
2699 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2700 u64 start = sblock->pagev[0]->logical;
2701 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2702 PAGE_SIZE;
2703
2704 scrub_parity_mark_sectors_error(sblock->sparity,
2705 start, end - start);
2706 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002707}
2708
Zhao Lei3b5753e2015-08-24 22:03:02 +08002709static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
Arne Jansena2de7332011-03-08 14:14:00 +01002710{
2711 struct btrfs_ordered_sum *sum = NULL;
Miao Xief51a4a12013-06-19 10:36:09 +08002712 unsigned long index;
Arne Jansena2de7332011-03-08 14:14:00 +01002713 unsigned long num_sectors;
Arne Jansena2de7332011-03-08 14:14:00 +01002714
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002715 while (!list_empty(&sctx->csum_list)) {
2716 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +01002717 struct btrfs_ordered_sum, list);
2718 if (sum->bytenr > logical)
2719 return 0;
2720 if (sum->bytenr + sum->len > logical)
2721 break;
2722
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002723 ++sctx->stat.csum_discards;
Arne Jansena2de7332011-03-08 14:14:00 +01002724 list_del(&sum->list);
2725 kfree(sum);
2726 sum = NULL;
2727 }
2728 if (!sum)
2729 return 0;
2730
Miao Xief51a4a12013-06-19 10:36:09 +08002731 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002732 num_sectors = sum->len / sctx->sectorsize;
Miao Xief51a4a12013-06-19 10:36:09 +08002733 memcpy(csum, sum->sums + index, sctx->csum_size);
2734 if (index == num_sectors - 1) {
Arne Jansena2de7332011-03-08 14:14:00 +01002735 list_del(&sum->list);
2736 kfree(sum);
2737 }
Miao Xief51a4a12013-06-19 10:36:09 +08002738 return 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002739}
2740
2741/* scrub extent tries to collect up to 64 kB for each bio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002742static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002743 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002744 u64 gen, int mirror_num, u64 physical_for_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002745{
2746 int ret;
2747 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002748 u32 blocksize;
2749
2750 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002751 blocksize = sctx->sectorsize;
2752 spin_lock(&sctx->stat_lock);
2753 sctx->stat.data_extents_scrubbed++;
2754 sctx->stat.data_bytes_scrubbed += len;
2755 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002756 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002757 blocksize = sctx->nodesize;
2758 spin_lock(&sctx->stat_lock);
2759 sctx->stat.tree_extents_scrubbed++;
2760 sctx->stat.tree_bytes_scrubbed += len;
2761 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002762 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002763 blocksize = sctx->sectorsize;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002764 WARN_ON(1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002765 }
Arne Jansena2de7332011-03-08 14:14:00 +01002766
2767 while (len) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002768 u64 l = min_t(u64, len, blocksize);
Arne Jansena2de7332011-03-08 14:14:00 +01002769 int have_csum = 0;
2770
2771 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2772 /* push csums to sbio */
Zhao Lei3b5753e2015-08-24 22:03:02 +08002773 have_csum = scrub_find_csum(sctx, logical, csum);
Arne Jansena2de7332011-03-08 14:14:00 +01002774 if (have_csum == 0)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002775 ++sctx->stat.no_csum;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002776 if (sctx->is_dev_replace && !have_csum) {
2777 ret = copy_nocow_pages(sctx, logical, l,
2778 mirror_num,
2779 physical_for_dev_replace);
2780 goto behind_scrub_pages;
2781 }
Arne Jansena2de7332011-03-08 14:14:00 +01002782 }
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002783 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002784 mirror_num, have_csum ? csum : NULL, 0,
2785 physical_for_dev_replace);
2786behind_scrub_pages:
Arne Jansena2de7332011-03-08 14:14:00 +01002787 if (ret)
2788 return ret;
2789 len -= l;
2790 logical += l;
2791 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002792 physical_for_dev_replace += l;
Arne Jansena2de7332011-03-08 14:14:00 +01002793 }
2794 return 0;
2795}
2796
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002797static int scrub_pages_for_parity(struct scrub_parity *sparity,
2798 u64 logical, u64 len,
2799 u64 physical, struct btrfs_device *dev,
2800 u64 flags, u64 gen, int mirror_num, u8 *csum)
2801{
2802 struct scrub_ctx *sctx = sparity->sctx;
2803 struct scrub_block *sblock;
2804 int index;
2805
David Sterba58c4e172016-02-11 10:49:42 +01002806 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002807 if (!sblock) {
2808 spin_lock(&sctx->stat_lock);
2809 sctx->stat.malloc_errors++;
2810 spin_unlock(&sctx->stat_lock);
2811 return -ENOMEM;
2812 }
2813
2814 /* one ref inside this function, plus one for each page added to
2815 * a bio later on */
Elena Reshetova186debd2017-03-03 10:55:23 +02002816 refcount_set(&sblock->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002817 sblock->sctx = sctx;
2818 sblock->no_io_error_seen = 1;
2819 sblock->sparity = sparity;
2820 scrub_parity_get(sparity);
2821
2822 for (index = 0; len > 0; index++) {
2823 struct scrub_page *spage;
2824 u64 l = min_t(u64, len, PAGE_SIZE);
2825
David Sterba58c4e172016-02-11 10:49:42 +01002826 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002827 if (!spage) {
2828leave_nomem:
2829 spin_lock(&sctx->stat_lock);
2830 sctx->stat.malloc_errors++;
2831 spin_unlock(&sctx->stat_lock);
2832 scrub_block_put(sblock);
2833 return -ENOMEM;
2834 }
2835 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2836 /* For scrub block */
2837 scrub_page_get(spage);
2838 sblock->pagev[index] = spage;
2839 /* For scrub parity */
2840 scrub_page_get(spage);
2841 list_add_tail(&spage->list, &sparity->spages);
2842 spage->sblock = sblock;
2843 spage->dev = dev;
2844 spage->flags = flags;
2845 spage->generation = gen;
2846 spage->logical = logical;
2847 spage->physical = physical;
2848 spage->mirror_num = mirror_num;
2849 if (csum) {
2850 spage->have_csum = 1;
2851 memcpy(spage->csum, csum, sctx->csum_size);
2852 } else {
2853 spage->have_csum = 0;
2854 }
2855 sblock->page_count++;
David Sterba58c4e172016-02-11 10:49:42 +01002856 spage->page = alloc_page(GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002857 if (!spage->page)
2858 goto leave_nomem;
2859 len -= l;
2860 logical += l;
2861 physical += l;
2862 }
2863
2864 WARN_ON(sblock->page_count == 0);
2865 for (index = 0; index < sblock->page_count; index++) {
2866 struct scrub_page *spage = sblock->pagev[index];
2867 int ret;
2868
2869 ret = scrub_add_page_to_rd_bio(sctx, spage);
2870 if (ret) {
2871 scrub_block_put(sblock);
2872 return ret;
2873 }
2874 }
2875
2876 /* last one frees, either here or in bio completion for last page */
2877 scrub_block_put(sblock);
2878 return 0;
2879}
2880
2881static int scrub_extent_for_parity(struct scrub_parity *sparity,
2882 u64 logical, u64 len,
2883 u64 physical, struct btrfs_device *dev,
2884 u64 flags, u64 gen, int mirror_num)
2885{
2886 struct scrub_ctx *sctx = sparity->sctx;
2887 int ret;
2888 u8 csum[BTRFS_CSUM_SIZE];
2889 u32 blocksize;
2890
Omar Sandoval4a770892015-06-19 11:52:52 -07002891 if (dev->missing) {
2892 scrub_parity_mark_sectors_error(sparity, logical, len);
2893 return 0;
2894 }
2895
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002896 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2897 blocksize = sctx->sectorsize;
2898 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2899 blocksize = sctx->nodesize;
2900 } else {
2901 blocksize = sctx->sectorsize;
2902 WARN_ON(1);
2903 }
2904
2905 while (len) {
2906 u64 l = min_t(u64, len, blocksize);
2907 int have_csum = 0;
2908
2909 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2910 /* push csums to sbio */
Zhao Lei3b5753e2015-08-24 22:03:02 +08002911 have_csum = scrub_find_csum(sctx, logical, csum);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002912 if (have_csum == 0)
2913 goto skip;
2914 }
2915 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2916 flags, gen, mirror_num,
2917 have_csum ? csum : NULL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002918 if (ret)
2919 return ret;
Dan Carpenter6b6d24b2014-12-12 22:30:00 +03002920skip:
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002921 len -= l;
2922 logical += l;
2923 physical += l;
2924 }
2925 return 0;
2926}
2927
Wang Shilong3b080b22014-04-01 18:01:43 +08002928/*
2929 * Given a physical address, this will calculate it's
2930 * logical offset. if this is a parity stripe, it will return
2931 * the most left data stripe's logical offset.
2932 *
2933 * return 0 if it is a data stripe, 1 means parity stripe.
2934 */
2935static int get_raid56_logic_offset(u64 physical, int num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002936 struct map_lookup *map, u64 *offset,
2937 u64 *stripe_start)
Wang Shilong3b080b22014-04-01 18:01:43 +08002938{
2939 int i;
2940 int j = 0;
2941 u64 stripe_nr;
2942 u64 last_offset;
David Sterba9d644a62015-02-20 18:42:11 +01002943 u32 stripe_index;
2944 u32 rot;
Wang Shilong3b080b22014-04-01 18:01:43 +08002945
2946 last_offset = (physical - map->stripes[num].physical) *
2947 nr_data_stripes(map);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002948 if (stripe_start)
2949 *stripe_start = last_offset;
2950
Wang Shilong3b080b22014-04-01 18:01:43 +08002951 *offset = last_offset;
2952 for (i = 0; i < nr_data_stripes(map); i++) {
2953 *offset = last_offset + i * map->stripe_len;
2954
Liu Bo42c61ab2017-04-03 13:45:24 -07002955 stripe_nr = div64_u64(*offset, map->stripe_len);
David Sterbab8b93ad2015-01-16 17:26:13 +01002956 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
Wang Shilong3b080b22014-04-01 18:01:43 +08002957
2958 /* Work out the disk rotation on this stripe-set */
David Sterba47c57132015-02-20 18:43:47 +01002959 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
Wang Shilong3b080b22014-04-01 18:01:43 +08002960 /* calculate which stripe this data locates */
2961 rot += i;
Wang Shilonge4fbaee2014-04-11 18:32:25 +08002962 stripe_index = rot % map->num_stripes;
Wang Shilong3b080b22014-04-01 18:01:43 +08002963 if (stripe_index == num)
2964 return 0;
2965 if (stripe_index < num)
2966 j++;
2967 }
2968 *offset = last_offset + j * map->stripe_len;
2969 return 1;
2970}
2971
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002972static void scrub_free_parity(struct scrub_parity *sparity)
2973{
2974 struct scrub_ctx *sctx = sparity->sctx;
2975 struct scrub_page *curr, *next;
2976 int nbits;
2977
2978 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2979 if (nbits) {
2980 spin_lock(&sctx->stat_lock);
2981 sctx->stat.read_errors += nbits;
2982 sctx->stat.uncorrectable_errors += nbits;
2983 spin_unlock(&sctx->stat_lock);
2984 }
2985
2986 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2987 list_del_init(&curr->list);
2988 scrub_page_put(curr);
2989 }
2990
2991 kfree(sparity);
2992}
2993
Zhao Lei20b2e302015-06-04 20:09:15 +08002994static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2995{
2996 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2997 work);
2998 struct scrub_ctx *sctx = sparity->sctx;
2999
3000 scrub_free_parity(sparity);
3001 scrub_pending_bio_dec(sctx);
3002}
3003
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003004static void scrub_parity_bio_endio(struct bio *bio)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003005{
3006 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003007 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003008
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003009 if (bio->bi_error)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003010 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
3011 sparity->nsectors);
3012
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003013 bio_put(bio);
Zhao Lei20b2e302015-06-04 20:09:15 +08003014
3015 btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
3016 scrub_parity_bio_endio_worker, NULL, NULL);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003017 btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003018}
3019
3020static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
3021{
3022 struct scrub_ctx *sctx = sparity->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003023 struct btrfs_fs_info *fs_info = sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003024 struct bio *bio;
3025 struct btrfs_raid_bio *rbio;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003026 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003027 u64 length;
3028 int ret;
3029
3030 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
3031 sparity->nsectors))
3032 goto out;
3033
Zhao Leia0dd59d2015-07-21 15:42:26 +08003034 length = sparity->logic_end - sparity->logic_start;
Qu Wenruoae6529c2017-03-29 09:33:21 +08003035
3036 btrfs_bio_counter_inc_blocked(fs_info);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003037 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
David Sterba825ad4c2017-03-28 14:45:22 +02003038 &length, &bbio);
Zhao Lei8e5cfb52015-01-20 15:11:33 +08003039 if (ret || !bbio || !bbio->raid_map)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003040 goto bbio_out;
3041
3042 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3043 if (!bio)
3044 goto bbio_out;
3045
3046 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
3047 bio->bi_private = sparity;
3048 bio->bi_end_io = scrub_parity_bio_endio;
3049
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04003050 rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08003051 length, sparity->scrub_dev,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003052 sparity->dbitmap,
3053 sparity->nsectors);
3054 if (!rbio)
3055 goto rbio_out;
3056
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003057 scrub_pending_bio_inc(sctx);
3058 raid56_parity_submit_scrub_rbio(rbio);
3059 return;
3060
3061rbio_out:
3062 bio_put(bio);
3063bbio_out:
Qu Wenruoae6529c2017-03-29 09:33:21 +08003064 btrfs_bio_counter_dec(fs_info);
Zhao Lei6e9606d2015-01-20 15:11:34 +08003065 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003066 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
3067 sparity->nsectors);
3068 spin_lock(&sctx->stat_lock);
3069 sctx->stat.malloc_errors++;
3070 spin_unlock(&sctx->stat_lock);
3071out:
3072 scrub_free_parity(sparity);
3073}
3074
3075static inline int scrub_calc_parity_bitmap_len(int nsectors)
3076{
Zhao Leibfca9a62014-12-08 19:55:57 +08003077 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003078}
3079
3080static void scrub_parity_get(struct scrub_parity *sparity)
3081{
Elena Reshetova78a76452017-03-03 10:55:24 +02003082 refcount_inc(&sparity->refs);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003083}
3084
3085static void scrub_parity_put(struct scrub_parity *sparity)
3086{
Elena Reshetova78a76452017-03-03 10:55:24 +02003087 if (!refcount_dec_and_test(&sparity->refs))
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003088 return;
3089
3090 scrub_parity_check_and_repair(sparity);
3091}
3092
3093static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
3094 struct map_lookup *map,
3095 struct btrfs_device *sdev,
3096 struct btrfs_path *path,
3097 u64 logic_start,
3098 u64 logic_end)
3099{
Jeff Mahoneyfb456252016-06-22 18:54:56 -04003100 struct btrfs_fs_info *fs_info = sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003101 struct btrfs_root *root = fs_info->extent_root;
3102 struct btrfs_root *csum_root = fs_info->csum_root;
3103 struct btrfs_extent_item *extent;
Omar Sandoval4a770892015-06-19 11:52:52 -07003104 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003105 u64 flags;
3106 int ret;
3107 int slot;
3108 struct extent_buffer *l;
3109 struct btrfs_key key;
3110 u64 generation;
3111 u64 extent_logical;
3112 u64 extent_physical;
3113 u64 extent_len;
Omar Sandoval4a770892015-06-19 11:52:52 -07003114 u64 mapped_length;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003115 struct btrfs_device *extent_dev;
3116 struct scrub_parity *sparity;
3117 int nsectors;
3118 int bitmap_len;
3119 int extent_mirror_num;
3120 int stop_loop = 0;
3121
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003122 nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003123 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
3124 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
3125 GFP_NOFS);
3126 if (!sparity) {
3127 spin_lock(&sctx->stat_lock);
3128 sctx->stat.malloc_errors++;
3129 spin_unlock(&sctx->stat_lock);
3130 return -ENOMEM;
3131 }
3132
3133 sparity->stripe_len = map->stripe_len;
3134 sparity->nsectors = nsectors;
3135 sparity->sctx = sctx;
3136 sparity->scrub_dev = sdev;
3137 sparity->logic_start = logic_start;
3138 sparity->logic_end = logic_end;
Elena Reshetova78a76452017-03-03 10:55:24 +02003139 refcount_set(&sparity->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003140 INIT_LIST_HEAD(&sparity->spages);
3141 sparity->dbitmap = sparity->bitmap;
3142 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
3143
3144 ret = 0;
3145 while (logic_start < logic_end) {
3146 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3147 key.type = BTRFS_METADATA_ITEM_KEY;
3148 else
3149 key.type = BTRFS_EXTENT_ITEM_KEY;
3150 key.objectid = logic_start;
3151 key.offset = (u64)-1;
3152
3153 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3154 if (ret < 0)
3155 goto out;
3156
3157 if (ret > 0) {
3158 ret = btrfs_previous_extent_item(root, path, 0);
3159 if (ret < 0)
3160 goto out;
3161 if (ret > 0) {
3162 btrfs_release_path(path);
3163 ret = btrfs_search_slot(NULL, root, &key,
3164 path, 0, 0);
3165 if (ret < 0)
3166 goto out;
3167 }
3168 }
3169
3170 stop_loop = 0;
3171 while (1) {
3172 u64 bytes;
3173
3174 l = path->nodes[0];
3175 slot = path->slots[0];
3176 if (slot >= btrfs_header_nritems(l)) {
3177 ret = btrfs_next_leaf(root, path);
3178 if (ret == 0)
3179 continue;
3180 if (ret < 0)
3181 goto out;
3182
3183 stop_loop = 1;
3184 break;
3185 }
3186 btrfs_item_key_to_cpu(l, &key, slot);
3187
Zhao Leid7cad232015-07-22 13:14:48 +08003188 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3189 key.type != BTRFS_METADATA_ITEM_KEY)
3190 goto next;
3191
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003192 if (key.type == BTRFS_METADATA_ITEM_KEY)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003193 bytes = fs_info->nodesize;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003194 else
3195 bytes = key.offset;
3196
3197 if (key.objectid + bytes <= logic_start)
3198 goto next;
3199
Zhao Leia0dd59d2015-07-21 15:42:26 +08003200 if (key.objectid >= logic_end) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003201 stop_loop = 1;
3202 break;
3203 }
3204
3205 while (key.objectid >= logic_start + map->stripe_len)
3206 logic_start += map->stripe_len;
3207
3208 extent = btrfs_item_ptr(l, slot,
3209 struct btrfs_extent_item);
3210 flags = btrfs_extent_flags(l, extent);
3211 generation = btrfs_extent_generation(l, extent);
3212
Zhao Leia323e812015-07-23 12:29:49 +08003213 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3214 (key.objectid < logic_start ||
3215 key.objectid + bytes >
3216 logic_start + map->stripe_len)) {
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003217 btrfs_err(fs_info,
3218 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
Zhao Leia323e812015-07-23 12:29:49 +08003219 key.objectid, logic_start);
Zhao Lei9799d2c32015-08-25 21:31:40 +08003220 spin_lock(&sctx->stat_lock);
3221 sctx->stat.uncorrectable_errors++;
3222 spin_unlock(&sctx->stat_lock);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003223 goto next;
3224 }
3225again:
3226 extent_logical = key.objectid;
3227 extent_len = bytes;
3228
3229 if (extent_logical < logic_start) {
3230 extent_len -= logic_start - extent_logical;
3231 extent_logical = logic_start;
3232 }
3233
3234 if (extent_logical + extent_len >
3235 logic_start + map->stripe_len)
3236 extent_len = logic_start + map->stripe_len -
3237 extent_logical;
3238
3239 scrub_parity_mark_sectors_data(sparity, extent_logical,
3240 extent_len);
3241
Omar Sandoval4a770892015-06-19 11:52:52 -07003242 mapped_length = extent_len;
Zhao Leif1fee652016-05-17 17:37:38 +08003243 bbio = NULL;
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02003244 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
3245 extent_logical, &mapped_length, &bbio,
3246 0);
Omar Sandoval4a770892015-06-19 11:52:52 -07003247 if (!ret) {
3248 if (!bbio || mapped_length < extent_len)
3249 ret = -EIO;
3250 }
3251 if (ret) {
3252 btrfs_put_bbio(bbio);
3253 goto out;
3254 }
3255 extent_physical = bbio->stripes[0].physical;
3256 extent_mirror_num = bbio->mirror_num;
3257 extent_dev = bbio->stripes[0].dev;
3258 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003259
3260 ret = btrfs_lookup_csums_range(csum_root,
3261 extent_logical,
3262 extent_logical + extent_len - 1,
3263 &sctx->csum_list, 1);
3264 if (ret)
3265 goto out;
3266
3267 ret = scrub_extent_for_parity(sparity, extent_logical,
3268 extent_len,
3269 extent_physical,
3270 extent_dev, flags,
3271 generation,
3272 extent_mirror_num);
Zhao Lei6fa96d72015-07-21 12:22:30 +08003273
3274 scrub_free_csums(sctx);
3275
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003276 if (ret)
3277 goto out;
3278
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003279 if (extent_logical + extent_len <
3280 key.objectid + bytes) {
3281 logic_start += map->stripe_len;
3282
3283 if (logic_start >= logic_end) {
3284 stop_loop = 1;
3285 break;
3286 }
3287
3288 if (logic_start < key.objectid + bytes) {
3289 cond_resched();
3290 goto again;
3291 }
3292 }
3293next:
3294 path->slots[0]++;
3295 }
3296
3297 btrfs_release_path(path);
3298
3299 if (stop_loop)
3300 break;
3301
3302 logic_start += map->stripe_len;
3303 }
3304out:
3305 if (ret < 0)
3306 scrub_parity_mark_sectors_error(sparity, logic_start,
Zhao Leia0dd59d2015-07-21 15:42:26 +08003307 logic_end - logic_start);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003308 scrub_parity_put(sparity);
3309 scrub_submit(sctx);
3310 mutex_lock(&sctx->wr_ctx.wr_lock);
3311 scrub_wr_submit(sctx);
3312 mutex_unlock(&sctx->wr_ctx.wr_lock);
3313
3314 btrfs_release_path(path);
3315 return ret < 0 ? ret : 0;
3316}
3317
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003318static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003319 struct map_lookup *map,
3320 struct btrfs_device *scrub_dev,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003321 int num, u64 base, u64 length,
3322 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003323{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003324 struct btrfs_path *path, *ppath;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04003325 struct btrfs_fs_info *fs_info = sctx->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01003326 struct btrfs_root *root = fs_info->extent_root;
3327 struct btrfs_root *csum_root = fs_info->csum_root;
3328 struct btrfs_extent_item *extent;
Arne Jansene7786c32011-05-28 20:58:38 +00003329 struct blk_plug plug;
Arne Jansena2de7332011-03-08 14:14:00 +01003330 u64 flags;
3331 int ret;
3332 int slot;
Arne Jansena2de7332011-03-08 14:14:00 +01003333 u64 nstripes;
Arne Jansena2de7332011-03-08 14:14:00 +01003334 struct extent_buffer *l;
Arne Jansena2de7332011-03-08 14:14:00 +01003335 u64 physical;
3336 u64 logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003337 u64 logic_end;
Wang Shilong3b080b22014-04-01 18:01:43 +08003338 u64 physical_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003339 u64 generation;
Jan Schmidte12fa9c2011-06-17 15:55:21 +02003340 int mirror_num;
Arne Jansen7a262852011-06-10 12:39:23 +02003341 struct reada_control *reada1;
3342 struct reada_control *reada2;
David Sterbae6c11f92016-03-24 18:00:53 +01003343 struct btrfs_key key;
Arne Jansen7a262852011-06-10 12:39:23 +02003344 struct btrfs_key key_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003345 u64 increment = map->stripe_len;
3346 u64 offset;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003347 u64 extent_logical;
3348 u64 extent_physical;
3349 u64 extent_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003350 u64 stripe_logical;
3351 u64 stripe_end;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003352 struct btrfs_device *extent_dev;
3353 int extent_mirror_num;
Wang Shilong3b080b22014-04-01 18:01:43 +08003354 int stop_loop = 0;
David Woodhouse53b381b2013-01-29 18:40:14 -05003355
Wang Shilong3b080b22014-04-01 18:01:43 +08003356 physical = map->stripes[num].physical;
Arne Jansena2de7332011-03-08 14:14:00 +01003357 offset = 0;
Liu Bo42c61ab2017-04-03 13:45:24 -07003358 nstripes = div64_u64(length, map->stripe_len);
Arne Jansena2de7332011-03-08 14:14:00 +01003359 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3360 offset = map->stripe_len * num;
3361 increment = map->stripe_len * map->num_stripes;
Jan Schmidt193ea742011-06-13 19:56:54 +02003362 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003363 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3364 int factor = map->num_stripes / map->sub_stripes;
3365 offset = map->stripe_len * (num / map->sub_stripes);
3366 increment = map->stripe_len * factor;
Jan Schmidt193ea742011-06-13 19:56:54 +02003367 mirror_num = num % map->sub_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003368 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3369 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003370 mirror_num = num % map->num_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003371 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3372 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003373 mirror_num = num % map->num_stripes + 1;
Zhao Leiffe2d202015-01-20 15:11:44 +08003374 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003375 get_raid56_logic_offset(physical, num, map, &offset, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003376 increment = map->stripe_len * nr_data_stripes(map);
3377 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003378 } else {
3379 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003380 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003381 }
3382
3383 path = btrfs_alloc_path();
3384 if (!path)
3385 return -ENOMEM;
3386
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003387 ppath = btrfs_alloc_path();
3388 if (!ppath) {
Tsutomu Itoh379d6852015-01-09 17:37:52 +09003389 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003390 return -ENOMEM;
3391 }
3392
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003393 /*
3394 * work on commit root. The related disk blocks are static as
3395 * long as COW is applied. This means, it is save to rewrite
3396 * them to repair disk errors without any race conditions
3397 */
Arne Jansena2de7332011-03-08 14:14:00 +01003398 path->search_commit_root = 1;
3399 path->skip_locking = 1;
3400
Gui Hecheng063c54d2015-01-09 09:39:40 +08003401 ppath->search_commit_root = 1;
3402 ppath->skip_locking = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003403 /*
Arne Jansen7a262852011-06-10 12:39:23 +02003404 * trigger the readahead for extent tree csum tree and wait for
3405 * completion. During readahead, the scrub is officially paused
3406 * to not hold off transaction commits
Arne Jansena2de7332011-03-08 14:14:00 +01003407 */
3408 logical = base + offset;
Wang Shilong3b080b22014-04-01 18:01:43 +08003409 physical_end = physical + nstripes * map->stripe_len;
Zhao Leiffe2d202015-01-20 15:11:44 +08003410 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003411 get_raid56_logic_offset(physical_end, num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003412 map, &logic_end, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003413 logic_end += base;
3414 } else {
3415 logic_end = logical + increment * nstripes;
3416 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003417 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003418 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilongcb7ab022013-12-04 21:16:53 +08003419 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003420
Arne Jansen7a262852011-06-10 12:39:23 +02003421 /* FIXME it might be better to start readahead at commit root */
David Sterbae6c11f92016-03-24 18:00:53 +01003422 key.objectid = logical;
3423 key.type = BTRFS_EXTENT_ITEM_KEY;
3424 key.offset = (u64)0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003425 key_end.objectid = logic_end;
Josef Bacik3173a182013-03-07 14:22:04 -05003426 key_end.type = BTRFS_METADATA_ITEM_KEY;
3427 key_end.offset = (u64)-1;
David Sterbae6c11f92016-03-24 18:00:53 +01003428 reada1 = btrfs_reada_add(root, &key, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003429
David Sterbae6c11f92016-03-24 18:00:53 +01003430 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3431 key.type = BTRFS_EXTENT_CSUM_KEY;
3432 key.offset = logical;
Arne Jansen7a262852011-06-10 12:39:23 +02003433 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3434 key_end.type = BTRFS_EXTENT_CSUM_KEY;
Wang Shilong3b080b22014-04-01 18:01:43 +08003435 key_end.offset = logic_end;
David Sterbae6c11f92016-03-24 18:00:53 +01003436 reada2 = btrfs_reada_add(csum_root, &key, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003437
Arne Jansen7a262852011-06-10 12:39:23 +02003438 if (!IS_ERR(reada1))
3439 btrfs_reada_wait(reada1);
3440 if (!IS_ERR(reada2))
3441 btrfs_reada_wait(reada2);
Arne Jansena2de7332011-03-08 14:14:00 +01003442
Arne Jansena2de7332011-03-08 14:14:00 +01003443
3444 /*
3445 * collect all data csums for the stripe to avoid seeking during
3446 * the scrub. This might currently (crc32) end up to be about 1MB
3447 */
Arne Jansene7786c32011-05-28 20:58:38 +00003448 blk_start_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003449
Arne Jansena2de7332011-03-08 14:14:00 +01003450 /*
3451 * now find all extents for each stripe and scrub them
3452 */
Arne Jansena2de7332011-03-08 14:14:00 +01003453 ret = 0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003454 while (physical < physical_end) {
Arne Jansena2de7332011-03-08 14:14:00 +01003455 /*
3456 * canceled?
3457 */
3458 if (atomic_read(&fs_info->scrub_cancel_req) ||
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003459 atomic_read(&sctx->cancel_req)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003460 ret = -ECANCELED;
3461 goto out;
3462 }
3463 /*
3464 * check to see if we have to pause
3465 */
3466 if (atomic_read(&fs_info->scrub_pause_req)) {
3467 /* push queued extents */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003468 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003469 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003470 mutex_lock(&sctx->wr_ctx.wr_lock);
3471 scrub_wr_submit(sctx);
3472 mutex_unlock(&sctx->wr_ctx.wr_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003473 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003474 atomic_read(&sctx->bios_in_flight) == 0);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003475 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
Wang Shilong3cb09292013-12-04 21:15:19 +08003476 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003477 }
3478
Zhao Leif2f66a22015-07-21 12:22:29 +08003479 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3480 ret = get_raid56_logic_offset(physical, num, map,
3481 &logical,
3482 &stripe_logical);
3483 logical += base;
3484 if (ret) {
Zhao Lei79553232015-08-18 17:54:30 +08003485 /* it is parity strip */
Zhao Leif2f66a22015-07-21 12:22:29 +08003486 stripe_logical += base;
Zhao Leia0dd59d2015-07-21 15:42:26 +08003487 stripe_end = stripe_logical + increment;
Zhao Leif2f66a22015-07-21 12:22:29 +08003488 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3489 ppath, stripe_logical,
3490 stripe_end);
3491 if (ret)
3492 goto out;
3493 goto skip;
3494 }
3495 }
3496
Wang Shilong7c76edb2014-01-12 21:38:32 +08003497 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3498 key.type = BTRFS_METADATA_ITEM_KEY;
3499 else
3500 key.type = BTRFS_EXTENT_ITEM_KEY;
Arne Jansena2de7332011-03-08 14:14:00 +01003501 key.objectid = logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003502 key.offset = (u64)-1;
Arne Jansena2de7332011-03-08 14:14:00 +01003503
3504 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3505 if (ret < 0)
3506 goto out;
Josef Bacik3173a182013-03-07 14:22:04 -05003507
Arne Jansen8c510322011-06-03 10:09:26 +02003508 if (ret > 0) {
Wang Shilongade2e0b2014-01-12 21:38:33 +08003509 ret = btrfs_previous_extent_item(root, path, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003510 if (ret < 0)
3511 goto out;
Arne Jansen8c510322011-06-03 10:09:26 +02003512 if (ret > 0) {
3513 /* there's no smaller item, so stick with the
3514 * larger one */
3515 btrfs_release_path(path);
3516 ret = btrfs_search_slot(NULL, root, &key,
3517 path, 0, 0);
3518 if (ret < 0)
3519 goto out;
3520 }
Arne Jansena2de7332011-03-08 14:14:00 +01003521 }
3522
Liu Bo625f1c8d2013-04-27 02:56:57 +00003523 stop_loop = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003524 while (1) {
Josef Bacik3173a182013-03-07 14:22:04 -05003525 u64 bytes;
3526
Arne Jansena2de7332011-03-08 14:14:00 +01003527 l = path->nodes[0];
3528 slot = path->slots[0];
3529 if (slot >= btrfs_header_nritems(l)) {
3530 ret = btrfs_next_leaf(root, path);
3531 if (ret == 0)
3532 continue;
3533 if (ret < 0)
3534 goto out;
3535
Liu Bo625f1c8d2013-04-27 02:56:57 +00003536 stop_loop = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003537 break;
3538 }
3539 btrfs_item_key_to_cpu(l, &key, slot);
3540
Zhao Leid7cad232015-07-22 13:14:48 +08003541 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3542 key.type != BTRFS_METADATA_ITEM_KEY)
3543 goto next;
3544
Josef Bacik3173a182013-03-07 14:22:04 -05003545 if (key.type == BTRFS_METADATA_ITEM_KEY)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003546 bytes = fs_info->nodesize;
Josef Bacik3173a182013-03-07 14:22:04 -05003547 else
3548 bytes = key.offset;
3549
3550 if (key.objectid + bytes <= logical)
Arne Jansena2de7332011-03-08 14:14:00 +01003551 goto next;
3552
Liu Bo625f1c8d2013-04-27 02:56:57 +00003553 if (key.objectid >= logical + map->stripe_len) {
3554 /* out of this device extent */
3555 if (key.objectid >= logic_end)
3556 stop_loop = 1;
3557 break;
3558 }
Arne Jansena2de7332011-03-08 14:14:00 +01003559
3560 extent = btrfs_item_ptr(l, slot,
3561 struct btrfs_extent_item);
3562 flags = btrfs_extent_flags(l, extent);
3563 generation = btrfs_extent_generation(l, extent);
3564
Zhao Leia323e812015-07-23 12:29:49 +08003565 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3566 (key.objectid < logical ||
3567 key.objectid + bytes >
3568 logical + map->stripe_len)) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003569 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003570 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02003571 key.objectid, logical);
Zhao Lei9799d2c32015-08-25 21:31:40 +08003572 spin_lock(&sctx->stat_lock);
3573 sctx->stat.uncorrectable_errors++;
3574 spin_unlock(&sctx->stat_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003575 goto next;
3576 }
3577
Liu Bo625f1c8d2013-04-27 02:56:57 +00003578again:
3579 extent_logical = key.objectid;
3580 extent_len = bytes;
3581
Arne Jansena2de7332011-03-08 14:14:00 +01003582 /*
3583 * trim extent to this stripe
3584 */
Liu Bo625f1c8d2013-04-27 02:56:57 +00003585 if (extent_logical < logical) {
3586 extent_len -= logical - extent_logical;
3587 extent_logical = logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003588 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003589 if (extent_logical + extent_len >
Arne Jansena2de7332011-03-08 14:14:00 +01003590 logical + map->stripe_len) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003591 extent_len = logical + map->stripe_len -
3592 extent_logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003593 }
3594
Liu Bo625f1c8d2013-04-27 02:56:57 +00003595 extent_physical = extent_logical - logical + physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003596 extent_dev = scrub_dev;
3597 extent_mirror_num = mirror_num;
3598 if (is_dev_replace)
3599 scrub_remap_extent(fs_info, extent_logical,
3600 extent_len, &extent_physical,
3601 &extent_dev,
3602 &extent_mirror_num);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003603
Zhao Leife8cf652015-07-22 13:14:47 +08003604 ret = btrfs_lookup_csums_range(csum_root,
3605 extent_logical,
3606 extent_logical +
3607 extent_len - 1,
3608 &sctx->csum_list, 1);
Arne Jansena2de7332011-03-08 14:14:00 +01003609 if (ret)
3610 goto out;
3611
Liu Bo625f1c8d2013-04-27 02:56:57 +00003612 ret = scrub_extent(sctx, extent_logical, extent_len,
3613 extent_physical, extent_dev, flags,
3614 generation, extent_mirror_num,
Stefan Behrens115930c2013-07-04 16:14:23 +02003615 extent_logical - logical + physical);
Zhao Lei6fa96d72015-07-21 12:22:30 +08003616
3617 scrub_free_csums(sctx);
3618
Liu Bo625f1c8d2013-04-27 02:56:57 +00003619 if (ret)
3620 goto out;
3621
3622 if (extent_logical + extent_len <
3623 key.objectid + bytes) {
Zhao Leiffe2d202015-01-20 15:11:44 +08003624 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003625 /*
3626 * loop until we find next data stripe
3627 * or we have finished all stripes.
3628 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003629loop:
3630 physical += map->stripe_len;
3631 ret = get_raid56_logic_offset(physical,
3632 num, map, &logical,
3633 &stripe_logical);
3634 logical += base;
3635
3636 if (ret && physical < physical_end) {
3637 stripe_logical += base;
3638 stripe_end = stripe_logical +
Zhao Leia0dd59d2015-07-21 15:42:26 +08003639 increment;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003640 ret = scrub_raid56_parity(sctx,
3641 map, scrub_dev, ppath,
3642 stripe_logical,
3643 stripe_end);
3644 if (ret)
3645 goto out;
3646 goto loop;
3647 }
Wang Shilong3b080b22014-04-01 18:01:43 +08003648 } else {
3649 physical += map->stripe_len;
3650 logical += increment;
3651 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003652 if (logical < key.objectid + bytes) {
3653 cond_resched();
3654 goto again;
3655 }
3656
Wang Shilong3b080b22014-04-01 18:01:43 +08003657 if (physical >= physical_end) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003658 stop_loop = 1;
3659 break;
3660 }
3661 }
Arne Jansena2de7332011-03-08 14:14:00 +01003662next:
3663 path->slots[0]++;
3664 }
Chris Mason71267332011-05-23 06:30:52 -04003665 btrfs_release_path(path);
Wang Shilong3b080b22014-04-01 18:01:43 +08003666skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003667 logical += increment;
3668 physical += map->stripe_len;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003669 spin_lock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003670 if (stop_loop)
3671 sctx->stat.last_physical = map->stripes[num].physical +
3672 length;
3673 else
3674 sctx->stat.last_physical = physical;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003675 spin_unlock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003676 if (stop_loop)
3677 break;
Arne Jansena2de7332011-03-08 14:14:00 +01003678 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003679out:
Arne Jansena2de7332011-03-08 14:14:00 +01003680 /* push queued extents */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003681 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003682 mutex_lock(&sctx->wr_ctx.wr_lock);
3683 scrub_wr_submit(sctx);
3684 mutex_unlock(&sctx->wr_ctx.wr_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003685
Arne Jansene7786c32011-05-28 20:58:38 +00003686 blk_finish_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003687 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003688 btrfs_free_path(ppath);
Arne Jansena2de7332011-03-08 14:14:00 +01003689 return ret < 0 ? ret : 0;
3690}
3691
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003692static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003693 struct btrfs_device *scrub_dev,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003694 u64 chunk_offset, u64 length,
Filipe Manana020d5b72015-11-19 10:57:20 +00003695 u64 dev_offset,
3696 struct btrfs_block_group_cache *cache,
3697 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003698{
Jeff Mahoneyfb456252016-06-22 18:54:56 -04003699 struct btrfs_fs_info *fs_info = sctx->fs_info;
3700 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
Arne Jansena2de7332011-03-08 14:14:00 +01003701 struct map_lookup *map;
3702 struct extent_map *em;
3703 int i;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003704 int ret = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003705
3706 read_lock(&map_tree->map_tree.lock);
3707 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3708 read_unlock(&map_tree->map_tree.lock);
3709
Filipe Manana020d5b72015-11-19 10:57:20 +00003710 if (!em) {
3711 /*
3712 * Might have been an unused block group deleted by the cleaner
3713 * kthread or relocation.
3714 */
3715 spin_lock(&cache->lock);
3716 if (!cache->removed)
3717 ret = -EINVAL;
3718 spin_unlock(&cache->lock);
3719
3720 return ret;
3721 }
Arne Jansena2de7332011-03-08 14:14:00 +01003722
Jeff Mahoney95617d62015-06-03 10:55:48 -04003723 map = em->map_lookup;
Arne Jansena2de7332011-03-08 14:14:00 +01003724 if (em->start != chunk_offset)
3725 goto out;
3726
3727 if (em->len < length)
3728 goto out;
3729
3730 for (i = 0; i < map->num_stripes; ++i) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003731 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
Arne Jansen859acaf2012-02-09 15:09:02 +01003732 map->stripes[i].physical == dev_offset) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003733 ret = scrub_stripe(sctx, map, scrub_dev, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003734 chunk_offset, length,
3735 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003736 if (ret)
3737 goto out;
3738 }
3739 }
3740out:
3741 free_extent_map(em);
3742
3743 return ret;
3744}
3745
3746static noinline_for_stack
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003747int scrub_enumerate_chunks(struct scrub_ctx *sctx,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003748 struct btrfs_device *scrub_dev, u64 start, u64 end,
3749 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003750{
3751 struct btrfs_dev_extent *dev_extent = NULL;
3752 struct btrfs_path *path;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003753 struct btrfs_fs_info *fs_info = sctx->fs_info;
3754 struct btrfs_root *root = fs_info->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003755 u64 length;
Arne Jansena2de7332011-03-08 14:14:00 +01003756 u64 chunk_offset;
Zhaolei55e3a602015-08-05 16:43:30 +08003757 int ret = 0;
Zhaolei76a8efa2015-11-17 18:46:17 +08003758 int ro_set;
Arne Jansena2de7332011-03-08 14:14:00 +01003759 int slot;
3760 struct extent_buffer *l;
3761 struct btrfs_key key;
3762 struct btrfs_key found_key;
3763 struct btrfs_block_group_cache *cache;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003764 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
Arne Jansena2de7332011-03-08 14:14:00 +01003765
3766 path = btrfs_alloc_path();
3767 if (!path)
3768 return -ENOMEM;
3769
David Sterbae4058b52015-11-27 16:31:35 +01003770 path->reada = READA_FORWARD;
Arne Jansena2de7332011-03-08 14:14:00 +01003771 path->search_commit_root = 1;
3772 path->skip_locking = 1;
3773
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003774 key.objectid = scrub_dev->devid;
Arne Jansena2de7332011-03-08 14:14:00 +01003775 key.offset = 0ull;
3776 key.type = BTRFS_DEV_EXTENT_KEY;
3777
Arne Jansena2de7332011-03-08 14:14:00 +01003778 while (1) {
3779 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3780 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003781 break;
3782 if (ret > 0) {
3783 if (path->slots[0] >=
3784 btrfs_header_nritems(path->nodes[0])) {
3785 ret = btrfs_next_leaf(root, path);
Zhaolei55e3a602015-08-05 16:43:30 +08003786 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003787 break;
Zhaolei55e3a602015-08-05 16:43:30 +08003788 if (ret > 0) {
3789 ret = 0;
3790 break;
3791 }
3792 } else {
3793 ret = 0;
Arne Jansen8c510322011-06-03 10:09:26 +02003794 }
3795 }
Arne Jansena2de7332011-03-08 14:14:00 +01003796
3797 l = path->nodes[0];
3798 slot = path->slots[0];
3799
3800 btrfs_item_key_to_cpu(l, &found_key, slot);
3801
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003802 if (found_key.objectid != scrub_dev->devid)
Arne Jansena2de7332011-03-08 14:14:00 +01003803 break;
3804
David Sterba962a2982014-06-04 18:41:45 +02003805 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
Arne Jansena2de7332011-03-08 14:14:00 +01003806 break;
3807
3808 if (found_key.offset >= end)
3809 break;
3810
3811 if (found_key.offset < key.offset)
3812 break;
3813
3814 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3815 length = btrfs_dev_extent_length(l, dev_extent);
3816
Qu Wenruoced96ed2014-06-19 10:42:51 +08003817 if (found_key.offset + length <= start)
3818 goto skip;
Arne Jansena2de7332011-03-08 14:14:00 +01003819
Arne Jansena2de7332011-03-08 14:14:00 +01003820 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3821
3822 /*
3823 * get a reference on the corresponding block group to prevent
3824 * the chunk from going away while we scrub it
3825 */
3826 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
Qu Wenruoced96ed2014-06-19 10:42:51 +08003827
3828 /* some chunks are removed but not committed to disk yet,
3829 * continue scrubbing */
3830 if (!cache)
3831 goto skip;
3832
Zhaolei55e3a602015-08-05 16:43:30 +08003833 /*
3834 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3835 * to avoid deadlock caused by:
3836 * btrfs_inc_block_group_ro()
3837 * -> btrfs_wait_for_commit()
3838 * -> btrfs_commit_transaction()
3839 * -> btrfs_scrub_pause()
3840 */
3841 scrub_pause_on(fs_info);
Jeff Mahoney5e00f192017-02-15 16:28:29 -05003842 ret = btrfs_inc_block_group_ro(fs_info, cache);
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003843 if (!ret && is_dev_replace) {
3844 /*
3845 * If we are doing a device replace wait for any tasks
3846 * that started dellaloc right before we set the block
3847 * group to RO mode, as they might have just allocated
3848 * an extent from it or decided they could do a nocow
3849 * write. And if any such tasks did that, wait for their
3850 * ordered extents to complete and then commit the
3851 * current transaction, so that we can later see the new
3852 * extent items in the extent tree - the ordered extents
3853 * create delayed data references (for cow writes) when
3854 * they complete, which will be run and insert the
3855 * corresponding extent items into the extent tree when
3856 * we commit the transaction they used when running
3857 * inode.c:btrfs_finish_ordered_io(). We later use
3858 * the commit root of the extent tree to find extents
3859 * to copy from the srcdev into the tgtdev, and we don't
3860 * want to miss any new extents.
3861 */
3862 btrfs_wait_block_group_reservations(cache);
3863 btrfs_wait_nocow_writers(cache);
3864 ret = btrfs_wait_ordered_roots(fs_info, -1,
3865 cache->key.objectid,
3866 cache->key.offset);
3867 if (ret > 0) {
3868 struct btrfs_trans_handle *trans;
3869
3870 trans = btrfs_join_transaction(root);
3871 if (IS_ERR(trans))
3872 ret = PTR_ERR(trans);
3873 else
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04003874 ret = btrfs_commit_transaction(trans);
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003875 if (ret) {
3876 scrub_pause_off(fs_info);
3877 btrfs_put_block_group(cache);
3878 break;
3879 }
3880 }
3881 }
Zhaolei55e3a602015-08-05 16:43:30 +08003882 scrub_pause_off(fs_info);
Zhaolei76a8efa2015-11-17 18:46:17 +08003883
3884 if (ret == 0) {
3885 ro_set = 1;
3886 } else if (ret == -ENOSPC) {
3887 /*
3888 * btrfs_inc_block_group_ro return -ENOSPC when it
3889 * failed in creating new chunk for metadata.
3890 * It is not a problem for scrub/replace, because
3891 * metadata are always cowed, and our scrub paused
3892 * commit_transactions.
3893 */
3894 ro_set = 0;
3895 } else {
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003896 btrfs_warn(fs_info,
3897 "failed setting block group ro, ret=%d\n",
Zhaolei76a8efa2015-11-17 18:46:17 +08003898 ret);
Zhaolei55e3a602015-08-05 16:43:30 +08003899 btrfs_put_block_group(cache);
3900 break;
3901 }
3902
Filipe Manana81e87a72016-05-14 16:32:35 +01003903 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003904 dev_replace->cursor_right = found_key.offset + length;
3905 dev_replace->cursor_left = found_key.offset;
3906 dev_replace->item_needs_writeback = 1;
Filipe Manana81e87a72016-05-14 16:32:35 +01003907 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
Zhao Lei8c204c92015-08-19 15:02:40 +08003908 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
Filipe Manana020d5b72015-11-19 10:57:20 +00003909 found_key.offset, cache, is_dev_replace);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003910
3911 /*
3912 * flush, submit all pending read and write bios, afterwards
3913 * wait for them.
3914 * Note that in the dev replace case, a read request causes
3915 * write requests that are submitted in the read completion
3916 * worker. Therefore in the current situation, it is required
3917 * that all write requests are flushed, so that all read and
3918 * write requests are really completed when bios_in_flight
3919 * changes to 0.
3920 */
3921 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3922 scrub_submit(sctx);
3923 mutex_lock(&sctx->wr_ctx.wr_lock);
3924 scrub_wr_submit(sctx);
3925 mutex_unlock(&sctx->wr_ctx.wr_lock);
3926
3927 wait_event(sctx->list_wait,
3928 atomic_read(&sctx->bios_in_flight) == 0);
Zhaoleib708ce92015-08-05 16:43:29 +08003929
3930 scrub_pause_on(fs_info);
Wang Shilong12cf9372014-02-19 19:24:17 +08003931
3932 /*
3933 * must be called before we decrease @scrub_paused.
3934 * make sure we don't block transaction commit while
3935 * we are waiting pending workers finished.
3936 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003937 wait_event(sctx->list_wait,
3938 atomic_read(&sctx->workers_pending) == 0);
Wang Shilong12cf9372014-02-19 19:24:17 +08003939 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3940
Zhaoleib708ce92015-08-05 16:43:29 +08003941 scrub_pause_off(fs_info);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003942
Filipe Manana1a1a8b72016-05-14 19:44:40 +01003943 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3944 dev_replace->cursor_left = dev_replace->cursor_right;
3945 dev_replace->item_needs_writeback = 1;
3946 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3947
Zhaolei76a8efa2015-11-17 18:46:17 +08003948 if (ro_set)
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04003949 btrfs_dec_block_group_ro(cache);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003950
Filipe Manana758f2df2015-11-19 11:45:48 +00003951 /*
3952 * We might have prevented the cleaner kthread from deleting
3953 * this block group if it was already unused because we raced
3954 * and set it to RO mode first. So add it back to the unused
3955 * list, otherwise it might not ever be deleted unless a manual
3956 * balance is triggered or it becomes used and unused again.
3957 */
3958 spin_lock(&cache->lock);
3959 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3960 btrfs_block_group_used(&cache->item) == 0) {
3961 spin_unlock(&cache->lock);
3962 spin_lock(&fs_info->unused_bgs_lock);
3963 if (list_empty(&cache->bg_list)) {
3964 btrfs_get_block_group(cache);
3965 list_add_tail(&cache->bg_list,
3966 &fs_info->unused_bgs);
3967 }
3968 spin_unlock(&fs_info->unused_bgs_lock);
3969 } else {
3970 spin_unlock(&cache->lock);
3971 }
3972
Arne Jansena2de7332011-03-08 14:14:00 +01003973 btrfs_put_block_group(cache);
3974 if (ret)
3975 break;
Stefan Behrensaf1be4f2012-11-27 17:39:51 +00003976 if (is_dev_replace &&
3977 atomic64_read(&dev_replace->num_write_errors) > 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003978 ret = -EIO;
3979 break;
3980 }
3981 if (sctx->stat.malloc_errors > 0) {
3982 ret = -ENOMEM;
3983 break;
3984 }
Qu Wenruoced96ed2014-06-19 10:42:51 +08003985skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003986 key.offset = found_key.offset + length;
Chris Mason71267332011-05-23 06:30:52 -04003987 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01003988 }
3989
Arne Jansena2de7332011-03-08 14:14:00 +01003990 btrfs_free_path(path);
Arne Jansen8c510322011-06-03 10:09:26 +02003991
Zhaolei55e3a602015-08-05 16:43:30 +08003992 return ret;
Arne Jansena2de7332011-03-08 14:14:00 +01003993}
3994
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003995static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3996 struct btrfs_device *scrub_dev)
Arne Jansena2de7332011-03-08 14:14:00 +01003997{
3998 int i;
3999 u64 bytenr;
4000 u64 gen;
4001 int ret;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004002 struct btrfs_fs_info *fs_info = sctx->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01004003
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004004 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01004005 return -EIO;
4006
Miao Xie5f546062014-07-24 11:37:09 +08004007 /* Seed devices of a new filesystem has their own generation. */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004008 if (scrub_dev->fs_devices != fs_info->fs_devices)
Miao Xie5f546062014-07-24 11:37:09 +08004009 gen = scrub_dev->generation;
4010 else
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004011 gen = fs_info->last_trans_committed;
Arne Jansena2de7332011-03-08 14:14:00 +01004012
4013 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
4014 bytenr = btrfs_sb_offset(i);
Miao Xie935e5cc2014-09-03 21:35:33 +08004015 if (bytenr + BTRFS_SUPER_INFO_SIZE >
4016 scrub_dev->commit_total_bytes)
Arne Jansena2de7332011-03-08 14:14:00 +01004017 break;
4018
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004019 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01004020 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01004021 NULL, 1, bytenr);
Arne Jansena2de7332011-03-08 14:14:00 +01004022 if (ret)
4023 return ret;
4024 }
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01004025 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01004026
4027 return 0;
4028}
4029
4030/*
4031 * get a reference count on fs_info->scrub_workers. start worker if necessary
4032 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01004033static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
4034 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01004035{
David Sterba6f011052015-02-16 18:34:01 +01004036 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
Qu Wenruo0339ef22014-02-28 10:46:17 +08004037 int max_active = fs_info->thread_pool_size;
Arne Jansena2de7332011-03-08 14:14:00 +01004038
Arne Jansen632dd772011-06-10 12:07:07 +02004039 if (fs_info->scrub_workers_refcnt == 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01004040 if (is_dev_replace)
Qu Wenruo0339ef22014-02-28 10:46:17 +08004041 fs_info->scrub_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04004042 btrfs_alloc_workqueue(fs_info, "scrub", flags,
Qu Wenruo0339ef22014-02-28 10:46:17 +08004043 1, 4);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004044 else
Qu Wenruo0339ef22014-02-28 10:46:17 +08004045 fs_info->scrub_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04004046 btrfs_alloc_workqueue(fs_info, "scrub", flags,
Qu Wenruo0339ef22014-02-28 10:46:17 +08004047 max_active, 4);
Zhao Leie82afc52015-06-12 20:36:58 +08004048 if (!fs_info->scrub_workers)
4049 goto fail_scrub_workers;
4050
Qu Wenruo0339ef22014-02-28 10:46:17 +08004051 fs_info->scrub_wr_completion_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04004052 btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
Qu Wenruo0339ef22014-02-28 10:46:17 +08004053 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08004054 if (!fs_info->scrub_wr_completion_workers)
4055 goto fail_scrub_wr_completion_workers;
4056
Qu Wenruo0339ef22014-02-28 10:46:17 +08004057 fs_info->scrub_nocow_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04004058 btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
Zhao Leie82afc52015-06-12 20:36:58 +08004059 if (!fs_info->scrub_nocow_workers)
4060 goto fail_scrub_nocow_workers;
Zhao Lei20b2e302015-06-04 20:09:15 +08004061 fs_info->scrub_parity_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04004062 btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
Zhao Lei20b2e302015-06-04 20:09:15 +08004063 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08004064 if (!fs_info->scrub_parity_workers)
4065 goto fail_scrub_parity_workers;
Arne Jansen632dd772011-06-10 12:07:07 +02004066 }
Arne Jansena2de7332011-03-08 14:14:00 +01004067 ++fs_info->scrub_workers_refcnt;
Zhao Leie82afc52015-06-12 20:36:58 +08004068 return 0;
4069
4070fail_scrub_parity_workers:
4071 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
4072fail_scrub_nocow_workers:
4073 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
4074fail_scrub_wr_completion_workers:
4075 btrfs_destroy_workqueue(fs_info->scrub_workers);
4076fail_scrub_workers:
4077 return -ENOMEM;
Arne Jansena2de7332011-03-08 14:14:00 +01004078}
4079
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004080static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01004081{
Stefan Behrensff023aa2012-11-06 11:43:11 +01004082 if (--fs_info->scrub_workers_refcnt == 0) {
Qu Wenruo0339ef22014-02-28 10:46:17 +08004083 btrfs_destroy_workqueue(fs_info->scrub_workers);
4084 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
4085 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
Zhao Lei20b2e302015-06-04 20:09:15 +08004086 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004087 }
Arne Jansena2de7332011-03-08 14:14:00 +01004088 WARN_ON(fs_info->scrub_workers_refcnt < 0);
Arne Jansena2de7332011-03-08 14:14:00 +01004089}
4090
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004091int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4092 u64 end, struct btrfs_scrub_progress *progress,
Stefan Behrens63a212a2012-11-05 18:29:28 +01004093 int readonly, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01004094{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004095 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01004096 int ret;
4097 struct btrfs_device *dev;
Miao Xie5d68da32014-07-24 11:37:07 +08004098 struct rcu_string *name;
Arne Jansena2de7332011-03-08 14:14:00 +01004099
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004100 if (btrfs_fs_closing(fs_info))
Arne Jansena2de7332011-03-08 14:14:00 +01004101 return -EINVAL;
4102
Jeff Mahoneyda170662016-06-15 09:22:56 -04004103 if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04004104 /*
4105 * in this case scrub is unable to calculate the checksum
4106 * the way scrub is implemented. Do not handle this
4107 * situation at all because it won't ever happen.
4108 */
Frank Holtonefe120a2013-12-20 11:37:06 -05004109 btrfs_err(fs_info,
4110 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04004111 fs_info->nodesize,
4112 BTRFS_STRIPE_LEN);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04004113 return -EINVAL;
4114 }
4115
Jeff Mahoneyda170662016-06-15 09:22:56 -04004116 if (fs_info->sectorsize != PAGE_SIZE) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04004117 /* not supported for data w/o checksums */
Chandan Rajendra751bebb2016-07-04 10:04:39 +05304118 btrfs_err_rl(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04004119 "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04004120 fs_info->sectorsize, PAGE_SIZE);
Arne Jansena2de7332011-03-08 14:14:00 +01004121 return -EINVAL;
4122 }
4123
Jeff Mahoneyda170662016-06-15 09:22:56 -04004124 if (fs_info->nodesize >
Stefan Behrens7a9e9982012-11-02 14:58:04 +01004125 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
Jeff Mahoneyda170662016-06-15 09:22:56 -04004126 fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01004127 /*
4128 * would exhaust the array bounds of pagev member in
4129 * struct scrub_block
4130 */
Jeff Mahoney5d163e02016-09-20 10:05:00 -04004131 btrfs_err(fs_info,
4132 "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04004133 fs_info->nodesize,
Stefan Behrens7a9e9982012-11-02 14:58:04 +01004134 SCRUB_MAX_PAGES_PER_BLOCK,
Jeff Mahoneyda170662016-06-15 09:22:56 -04004135 fs_info->sectorsize,
Stefan Behrens7a9e9982012-11-02 14:58:04 +01004136 SCRUB_MAX_PAGES_PER_BLOCK);
4137 return -EINVAL;
4138 }
4139
Arne Jansena2de7332011-03-08 14:14:00 +01004140
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004141 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4142 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
Stefan Behrens63a212a2012-11-05 18:29:28 +01004143 if (!dev || (dev->missing && !is_dev_replace)) {
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004144 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01004145 return -ENODEV;
4146 }
Arne Jansena2de7332011-03-08 14:14:00 +01004147
Miao Xie5d68da32014-07-24 11:37:07 +08004148 if (!is_dev_replace && !readonly && !dev->writeable) {
4149 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4150 rcu_read_lock();
4151 name = rcu_dereference(dev->name);
4152 btrfs_err(fs_info, "scrub: device %s is not writable",
4153 name->str);
4154 rcu_read_unlock();
4155 return -EROFS;
4156 }
4157
Wang Shilong3b7a0162013-10-12 02:11:12 +08004158 mutex_lock(&fs_info->scrub_lock);
Stefan Behrens63a212a2012-11-05 18:29:28 +01004159 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
Arne Jansena2de7332011-03-08 14:14:00 +01004160 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004161 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004162 return -EIO;
Arne Jansena2de7332011-03-08 14:14:00 +01004163 }
4164
Liu Bo73beece2015-07-17 16:49:19 +08004165 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
Stefan Behrens8dabb742012-11-06 13:15:27 +01004166 if (dev->scrub_device ||
4167 (!is_dev_replace &&
4168 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
Liu Bo73beece2015-07-17 16:49:19 +08004169 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01004170 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004171 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01004172 return -EINPROGRESS;
4173 }
Liu Bo73beece2015-07-17 16:49:19 +08004174 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
Wang Shilong3b7a0162013-10-12 02:11:12 +08004175
4176 ret = scrub_workers_get(fs_info, is_dev_replace);
4177 if (ret) {
4178 mutex_unlock(&fs_info->scrub_lock);
4179 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4180 return ret;
4181 }
4182
Stefan Behrens63a212a2012-11-05 18:29:28 +01004183 sctx = scrub_setup_ctx(dev, is_dev_replace);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004184 if (IS_ERR(sctx)) {
Arne Jansena2de7332011-03-08 14:14:00 +01004185 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004186 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4187 scrub_workers_put(fs_info);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004188 return PTR_ERR(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01004189 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004190 sctx->readonly = readonly;
4191 dev->scrub_device = sctx;
Wang Shilong3cb09292013-12-04 21:15:19 +08004192 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01004193
Wang Shilong3cb09292013-12-04 21:15:19 +08004194 /*
4195 * checking @scrub_pause_req here, we can avoid
4196 * race between committing transaction and scrubbing.
4197 */
Wang Shilongcb7ab022013-12-04 21:16:53 +08004198 __scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01004199 atomic_inc(&fs_info->scrubs_running);
4200 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01004201
Stefan Behrensff023aa2012-11-06 11:43:11 +01004202 if (!is_dev_replace) {
Wang Shilong9b011ad2013-10-25 19:12:02 +08004203 /*
4204 * by holding device list mutex, we can
4205 * kick off writing super in log tree sync.
4206 */
Wang Shilong3cb09292013-12-04 21:15:19 +08004207 mutex_lock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004208 ret = scrub_supers(sctx, dev);
Wang Shilong3cb09292013-12-04 21:15:19 +08004209 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004210 }
Arne Jansena2de7332011-03-08 14:14:00 +01004211
4212 if (!ret)
Stefan Behrensff023aa2012-11-06 11:43:11 +01004213 ret = scrub_enumerate_chunks(sctx, dev, start, end,
4214 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01004215
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01004216 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01004217 atomic_dec(&fs_info->scrubs_running);
4218 wake_up(&fs_info->scrub_pause_wait);
4219
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01004220 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02004221
Arne Jansena2de7332011-03-08 14:14:00 +01004222 if (progress)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004223 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01004224
4225 mutex_lock(&fs_info->scrub_lock);
4226 dev->scrub_device = NULL;
Wang Shilong3b7a0162013-10-12 02:11:12 +08004227 scrub_workers_put(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01004228 mutex_unlock(&fs_info->scrub_lock);
4229
Filipe Mananaf55985f2015-02-09 21:14:24 +00004230 scrub_put_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01004231
4232 return ret;
4233}
4234
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04004235void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01004236{
Arne Jansena2de7332011-03-08 14:14:00 +01004237 mutex_lock(&fs_info->scrub_lock);
4238 atomic_inc(&fs_info->scrub_pause_req);
4239 while (atomic_read(&fs_info->scrubs_paused) !=
4240 atomic_read(&fs_info->scrubs_running)) {
4241 mutex_unlock(&fs_info->scrub_lock);
4242 wait_event(fs_info->scrub_pause_wait,
4243 atomic_read(&fs_info->scrubs_paused) ==
4244 atomic_read(&fs_info->scrubs_running));
4245 mutex_lock(&fs_info->scrub_lock);
4246 }
4247 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01004248}
4249
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04004250void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01004251{
Arne Jansena2de7332011-03-08 14:14:00 +01004252 atomic_dec(&fs_info->scrub_pause_req);
4253 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01004254}
4255
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004256int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01004257{
Arne Jansena2de7332011-03-08 14:14:00 +01004258 mutex_lock(&fs_info->scrub_lock);
4259 if (!atomic_read(&fs_info->scrubs_running)) {
4260 mutex_unlock(&fs_info->scrub_lock);
4261 return -ENOTCONN;
4262 }
4263
4264 atomic_inc(&fs_info->scrub_cancel_req);
4265 while (atomic_read(&fs_info->scrubs_running)) {
4266 mutex_unlock(&fs_info->scrub_lock);
4267 wait_event(fs_info->scrub_pause_wait,
4268 atomic_read(&fs_info->scrubs_running) == 0);
4269 mutex_lock(&fs_info->scrub_lock);
4270 }
4271 atomic_dec(&fs_info->scrub_cancel_req);
4272 mutex_unlock(&fs_info->scrub_lock);
4273
4274 return 0;
4275}
4276
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004277int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
4278 struct btrfs_device *dev)
Jeff Mahoney49b25e02012-03-01 17:24:58 +01004279{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004280 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01004281
4282 mutex_lock(&fs_info->scrub_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004283 sctx = dev->scrub_device;
4284 if (!sctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01004285 mutex_unlock(&fs_info->scrub_lock);
4286 return -ENOTCONN;
4287 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004288 atomic_inc(&sctx->cancel_req);
Arne Jansena2de7332011-03-08 14:14:00 +01004289 while (dev->scrub_device) {
4290 mutex_unlock(&fs_info->scrub_lock);
4291 wait_event(fs_info->scrub_pause_wait,
4292 dev->scrub_device == NULL);
4293 mutex_lock(&fs_info->scrub_lock);
4294 }
4295 mutex_unlock(&fs_info->scrub_lock);
4296
4297 return 0;
4298}
Stefan Behrens1623ede2012-03-27 14:21:26 -04004299
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04004300int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
Arne Jansena2de7332011-03-08 14:14:00 +01004301 struct btrfs_scrub_progress *progress)
4302{
4303 struct btrfs_device *dev;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004304 struct scrub_ctx *sctx = NULL;
Arne Jansena2de7332011-03-08 14:14:00 +01004305
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004306 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4307 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +01004308 if (dev)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004309 sctx = dev->scrub_device;
4310 if (sctx)
4311 memcpy(progress, &sctx->stat, sizeof(*progress));
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004312 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01004313
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004314 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
Arne Jansena2de7332011-03-08 14:14:00 +01004315}
Stefan Behrensff023aa2012-11-06 11:43:11 +01004316
4317static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4318 u64 extent_logical, u64 extent_len,
4319 u64 *extent_physical,
4320 struct btrfs_device **extent_dev,
4321 int *extent_mirror_num)
4322{
4323 u64 mapped_length;
4324 struct btrfs_bio *bbio = NULL;
4325 int ret;
4326
4327 mapped_length = extent_len;
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02004328 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
Stefan Behrensff023aa2012-11-06 11:43:11 +01004329 &mapped_length, &bbio, 0);
4330 if (ret || !bbio || mapped_length < extent_len ||
4331 !bbio->stripes[0].dev->bdev) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08004332 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004333 return;
4334 }
4335
4336 *extent_physical = bbio->stripes[0].physical;
4337 *extent_mirror_num = bbio->mirror_num;
4338 *extent_dev = bbio->stripes[0].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08004339 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004340}
4341
Stefan Behrensff023aa2012-11-06 11:43:11 +01004342static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4343 int mirror_num, u64 physical_for_dev_replace)
4344{
4345 struct scrub_copy_nocow_ctx *nocow_ctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04004346 struct btrfs_fs_info *fs_info = sctx->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004347
4348 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4349 if (!nocow_ctx) {
4350 spin_lock(&sctx->stat_lock);
4351 sctx->stat.malloc_errors++;
4352 spin_unlock(&sctx->stat_lock);
4353 return -ENOMEM;
4354 }
4355
4356 scrub_pending_trans_workers_inc(sctx);
4357
4358 nocow_ctx->sctx = sctx;
4359 nocow_ctx->logical = logical;
4360 nocow_ctx->len = len;
4361 nocow_ctx->mirror_num = mirror_num;
4362 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
Liu Bo9e0af232014-08-15 23:36:53 +08004363 btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4364 copy_nocow_pages_worker, NULL, NULL);
Josef Bacik652f25a2013-09-12 16:58:28 -04004365 INIT_LIST_HEAD(&nocow_ctx->inodes);
Qu Wenruo0339ef22014-02-28 10:46:17 +08004366 btrfs_queue_work(fs_info->scrub_nocow_workers,
4367 &nocow_ctx->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004368
4369 return 0;
4370}
4371
Josef Bacik652f25a2013-09-12 16:58:28 -04004372static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4373{
4374 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4375 struct scrub_nocow_inode *nocow_inode;
4376
4377 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4378 if (!nocow_inode)
4379 return -ENOMEM;
4380 nocow_inode->inum = inum;
4381 nocow_inode->offset = offset;
4382 nocow_inode->root = root;
4383 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4384 return 0;
4385}
4386
4387#define COPY_COMPLETE 1
4388
Stefan Behrensff023aa2012-11-06 11:43:11 +01004389static void copy_nocow_pages_worker(struct btrfs_work *work)
4390{
4391 struct scrub_copy_nocow_ctx *nocow_ctx =
4392 container_of(work, struct scrub_copy_nocow_ctx, work);
4393 struct scrub_ctx *sctx = nocow_ctx->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004394 struct btrfs_fs_info *fs_info = sctx->fs_info;
4395 struct btrfs_root *root = fs_info->extent_root;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004396 u64 logical = nocow_ctx->logical;
4397 u64 len = nocow_ctx->len;
4398 int mirror_num = nocow_ctx->mirror_num;
4399 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4400 int ret;
4401 struct btrfs_trans_handle *trans = NULL;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004402 struct btrfs_path *path;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004403 int not_written = 0;
4404
Stefan Behrensff023aa2012-11-06 11:43:11 +01004405 path = btrfs_alloc_path();
4406 if (!path) {
4407 spin_lock(&sctx->stat_lock);
4408 sctx->stat.malloc_errors++;
4409 spin_unlock(&sctx->stat_lock);
4410 not_written = 1;
4411 goto out;
4412 }
4413
4414 trans = btrfs_join_transaction(root);
4415 if (IS_ERR(trans)) {
4416 not_written = 1;
4417 goto out;
4418 }
4419
4420 ret = iterate_inodes_from_logical(logical, fs_info, path,
Josef Bacik652f25a2013-09-12 16:58:28 -04004421 record_inode_for_nocow, nocow_ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004422 if (ret != 0 && ret != -ENOENT) {
Jeff Mahoney5d163e02016-09-20 10:05:00 -04004423 btrfs_warn(fs_info,
4424 "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
4425 logical, physical_for_dev_replace, len, mirror_num,
4426 ret);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004427 not_written = 1;
4428 goto out;
4429 }
4430
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04004431 btrfs_end_transaction(trans);
Josef Bacik652f25a2013-09-12 16:58:28 -04004432 trans = NULL;
4433 while (!list_empty(&nocow_ctx->inodes)) {
4434 struct scrub_nocow_inode *entry;
4435 entry = list_first_entry(&nocow_ctx->inodes,
4436 struct scrub_nocow_inode,
4437 list);
4438 list_del_init(&entry->list);
4439 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4440 entry->root, nocow_ctx);
4441 kfree(entry);
4442 if (ret == COPY_COMPLETE) {
4443 ret = 0;
4444 break;
4445 } else if (ret) {
4446 break;
4447 }
4448 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004449out:
Josef Bacik652f25a2013-09-12 16:58:28 -04004450 while (!list_empty(&nocow_ctx->inodes)) {
4451 struct scrub_nocow_inode *entry;
4452 entry = list_first_entry(&nocow_ctx->inodes,
4453 struct scrub_nocow_inode,
4454 list);
4455 list_del_init(&entry->list);
4456 kfree(entry);
4457 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004458 if (trans && !IS_ERR(trans))
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04004459 btrfs_end_transaction(trans);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004460 if (not_written)
4461 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4462 num_uncorrectable_read_errors);
4463
4464 btrfs_free_path(path);
4465 kfree(nocow_ctx);
4466
4467 scrub_pending_trans_workers_dec(sctx);
4468}
4469
Nikolay Borisov1c8c9c52017-02-20 13:51:05 +02004470static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
Gui Hecheng32159242014-11-10 15:36:08 +08004471 u64 logical)
4472{
4473 struct extent_state *cached_state = NULL;
4474 struct btrfs_ordered_extent *ordered;
4475 struct extent_io_tree *io_tree;
4476 struct extent_map *em;
4477 u64 lockstart = start, lockend = start + len - 1;
4478 int ret = 0;
4479
Nikolay Borisov1c8c9c52017-02-20 13:51:05 +02004480 io_tree = &inode->io_tree;
Gui Hecheng32159242014-11-10 15:36:08 +08004481
David Sterbaff13db42015-12-03 14:30:40 +01004482 lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
Nikolay Borisov1c8c9c52017-02-20 13:51:05 +02004483 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
Gui Hecheng32159242014-11-10 15:36:08 +08004484 if (ordered) {
4485 btrfs_put_ordered_extent(ordered);
4486 ret = 1;
4487 goto out_unlock;
4488 }
4489
4490 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4491 if (IS_ERR(em)) {
4492 ret = PTR_ERR(em);
4493 goto out_unlock;
4494 }
4495
4496 /*
4497 * This extent does not actually cover the logical extent anymore,
4498 * move on to the next inode.
4499 */
4500 if (em->block_start > logical ||
4501 em->block_start + em->block_len < logical + len) {
4502 free_extent_map(em);
4503 ret = 1;
4504 goto out_unlock;
4505 }
4506 free_extent_map(em);
4507
4508out_unlock:
4509 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4510 GFP_NOFS);
4511 return ret;
4512}
4513
Josef Bacik652f25a2013-09-12 16:58:28 -04004514static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4515 struct scrub_copy_nocow_ctx *nocow_ctx)
Stefan Behrensff023aa2012-11-06 11:43:11 +01004516{
Jeff Mahoneyfb456252016-06-22 18:54:56 -04004517 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004518 struct btrfs_key key;
Miao Xie826aa0a2013-06-27 18:50:59 +08004519 struct inode *inode;
4520 struct page *page;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004521 struct btrfs_root *local_root;
Josef Bacik652f25a2013-09-12 16:58:28 -04004522 struct extent_io_tree *io_tree;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004523 u64 physical_for_dev_replace;
Gui Hecheng32159242014-11-10 15:36:08 +08004524 u64 nocow_ctx_logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004525 u64 len = nocow_ctx->len;
Miao Xie826aa0a2013-06-27 18:50:59 +08004526 unsigned long index;
Liu Bo6f1c3602013-01-29 03:22:10 +00004527 int srcu_index;
Josef Bacik652f25a2013-09-12 16:58:28 -04004528 int ret = 0;
4529 int err = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004530
4531 key.objectid = root;
4532 key.type = BTRFS_ROOT_ITEM_KEY;
4533 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +00004534
4535 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4536
Stefan Behrensff023aa2012-11-06 11:43:11 +01004537 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
Liu Bo6f1c3602013-01-29 03:22:10 +00004538 if (IS_ERR(local_root)) {
4539 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004540 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +00004541 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004542
4543 key.type = BTRFS_INODE_ITEM_KEY;
4544 key.objectid = inum;
4545 key.offset = 0;
4546 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
Liu Bo6f1c3602013-01-29 03:22:10 +00004547 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004548 if (IS_ERR(inode))
4549 return PTR_ERR(inode);
4550
Miao Xieedd14002013-06-27 18:51:00 +08004551 /* Avoid truncate/dio/punch hole.. */
Al Viro59551022016-01-22 15:40:57 -05004552 inode_lock(inode);
Miao Xieedd14002013-06-27 18:51:00 +08004553 inode_dio_wait(inode);
4554
Stefan Behrensff023aa2012-11-06 11:43:11 +01004555 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -04004556 io_tree = &BTRFS_I(inode)->io_tree;
Gui Hecheng32159242014-11-10 15:36:08 +08004557 nocow_ctx_logical = nocow_ctx->logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004558
Nikolay Borisov1c8c9c52017-02-20 13:51:05 +02004559 ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4560 nocow_ctx_logical);
Gui Hecheng32159242014-11-10 15:36:08 +08004561 if (ret) {
4562 ret = ret > 0 ? 0 : ret;
4563 goto out;
Josef Bacik652f25a2013-09-12 16:58:28 -04004564 }
4565
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004566 while (len >= PAGE_SIZE) {
4567 index = offset >> PAGE_SHIFT;
Miao Xieedd14002013-06-27 18:51:00 +08004568again:
Stefan Behrensff023aa2012-11-06 11:43:11 +01004569 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4570 if (!page) {
Frank Holtonefe120a2013-12-20 11:37:06 -05004571 btrfs_err(fs_info, "find_or_create_page() failed");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004572 ret = -ENOMEM;
Miao Xie826aa0a2013-06-27 18:50:59 +08004573 goto out;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004574 }
4575
4576 if (PageUptodate(page)) {
4577 if (PageDirty(page))
4578 goto next_page;
4579 } else {
4580 ClearPageError(page);
Gui Hecheng32159242014-11-10 15:36:08 +08004581 err = extent_read_full_page(io_tree, page,
Josef Bacik652f25a2013-09-12 16:58:28 -04004582 btrfs_get_extent,
4583 nocow_ctx->mirror_num);
Miao Xie826aa0a2013-06-27 18:50:59 +08004584 if (err) {
4585 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004586 goto next_page;
4587 }
Miao Xieedd14002013-06-27 18:51:00 +08004588
Miao Xie26b258912013-06-27 18:50:58 +08004589 lock_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004590 /*
4591 * If the page has been remove from the page cache,
4592 * the data on it is meaningless, because it may be
4593 * old one, the new data may be written into the new
4594 * page in the page cache.
4595 */
4596 if (page->mapping != inode->i_mapping) {
Josef Bacik652f25a2013-09-12 16:58:28 -04004597 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004598 put_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004599 goto again;
4600 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004601 if (!PageUptodate(page)) {
4602 ret = -EIO;
4603 goto next_page;
4604 }
4605 }
Gui Hecheng32159242014-11-10 15:36:08 +08004606
Nikolay Borisov1c8c9c52017-02-20 13:51:05 +02004607 ret = check_extent_to_block(BTRFS_I(inode), offset, len,
Gui Hecheng32159242014-11-10 15:36:08 +08004608 nocow_ctx_logical);
4609 if (ret) {
4610 ret = ret > 0 ? 0 : ret;
4611 goto next_page;
4612 }
4613
Miao Xie826aa0a2013-06-27 18:50:59 +08004614 err = write_page_nocow(nocow_ctx->sctx,
4615 physical_for_dev_replace, page);
4616 if (err)
4617 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004618next_page:
Miao Xie826aa0a2013-06-27 18:50:59 +08004619 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004620 put_page(page);
Miao Xie826aa0a2013-06-27 18:50:59 +08004621
4622 if (ret)
4623 break;
4624
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004625 offset += PAGE_SIZE;
4626 physical_for_dev_replace += PAGE_SIZE;
4627 nocow_ctx_logical += PAGE_SIZE;
4628 len -= PAGE_SIZE;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004629 }
Josef Bacik652f25a2013-09-12 16:58:28 -04004630 ret = COPY_COMPLETE;
Miao Xie826aa0a2013-06-27 18:50:59 +08004631out:
Al Viro59551022016-01-22 15:40:57 -05004632 inode_unlock(inode);
Miao Xie826aa0a2013-06-27 18:50:59 +08004633 iput(inode);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004634 return ret;
4635}
4636
4637static int write_page_nocow(struct scrub_ctx *sctx,
4638 u64 physical_for_dev_replace, struct page *page)
4639{
4640 struct bio *bio;
4641 struct btrfs_device *dev;
4642 int ret;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004643
4644 dev = sctx->wr_ctx.tgtdev;
4645 if (!dev)
4646 return -EIO;
4647 if (!dev->bdev) {
Jeff Mahoneyfb456252016-06-22 18:54:56 -04004648 btrfs_warn_rl(dev->fs_info,
David Sterba94647322015-10-08 11:01:36 +02004649 "scrub write_page_nocow(bdev == NULL) is unexpected");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004650 return -EIO;
4651 }
Chris Mason9be33952013-05-17 18:30:14 -04004652 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004653 if (!bio) {
4654 spin_lock(&sctx->stat_lock);
4655 sctx->stat.malloc_errors++;
4656 spin_unlock(&sctx->stat_lock);
4657 return -ENOMEM;
4658 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07004659 bio->bi_iter.bi_size = 0;
4660 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004661 bio->bi_bdev = dev->bdev;
Christoph Hellwig70fd7612016-11-01 07:40:10 -06004662 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004663 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4664 if (ret != PAGE_SIZE) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01004665leave_with_eio:
4666 bio_put(bio);
4667 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4668 return -EIO;
4669 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004670
Mike Christie4e49ea42016-06-05 14:31:41 -05004671 if (btrfsic_submit_bio_wait(bio))
Stefan Behrensff023aa2012-11-06 11:43:11 +01004672 goto leave_with_eio;
4673
4674 bio_put(bio);
4675 return 0;
4676}