blob: 1d195d2b32c6ee62d497539172c15e011c26e467 [file] [log] [blame]
Arne Jansena2de7332011-03-08 14:14:00 +01001/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
Arne Jansena2de7332011-03-08 14:14:00 +01003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
Arne Jansena2de7332011-03-08 14:14:00 +010019#include <linux/blkdev.h>
Jan Schmidt558540c2011-06-13 19:59:12 +020020#include <linux/ratelimit.h>
Arne Jansena2de7332011-03-08 14:14:00 +010021#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
Jan Schmidt0ef8e452011-06-13 20:04:15 +020025#include "transaction.h"
Jan Schmidt558540c2011-06-13 19:59:12 +020026#include "backref.h"
Jan Schmidt5da6fcb2011-08-04 18:11:04 +020027#include "extent_io.h"
Stefan Behrensff023aa2012-11-06 11:43:11 +010028#include "dev-replace.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010029#include "check-integrity.h"
Josef Bacik606686e2012-06-04 14:03:51 -040030#include "rcu-string.h"
David Woodhouse53b381b2013-01-29 18:40:14 -050031#include "raid56.h"
Arne Jansena2de7332011-03-08 14:14:00 +010032
33/*
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
37 * any can be found.
38 *
39 * Future enhancements:
Arne Jansena2de7332011-03-08 14:14:00 +010040 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
Arne Jansena2de7332011-03-08 14:14:00 +010042 * - track and record media errors, throw out bad devices
Arne Jansena2de7332011-03-08 14:14:00 +010043 * - add a mode to also read unallocated space
Arne Jansena2de7332011-03-08 14:14:00 +010044 */
45
Stefan Behrensb5d67f62012-03-27 14:21:27 -040046struct scrub_block;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010047struct scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +010048
Stefan Behrensff023aa2012-11-06 11:43:11 +010049/*
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
54 */
55#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
Stefan Behrens7a9e9982012-11-02 14:58:04 +010058
59/*
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
63 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -040064#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
Arne Jansena2de7332011-03-08 14:14:00 +010065
Miao Xieaf8e2d12014-10-23 14:42:50 +080066struct scrub_recover {
67 atomic_t refs;
68 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +080069 u64 map_length;
70};
71
Arne Jansena2de7332011-03-08 14:14:00 +010072struct scrub_page {
Stefan Behrensb5d67f62012-03-27 14:21:27 -040073 struct scrub_block *sblock;
74 struct page *page;
Stefan Behrens442a4f62012-05-25 16:06:08 +020075 struct btrfs_device *dev;
Miao Xie5a6ac9e2014-11-06 17:20:58 +080076 struct list_head list;
Arne Jansena2de7332011-03-08 14:14:00 +010077 u64 flags; /* extent flags */
78 u64 generation;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040079 u64 logical;
80 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010081 u64 physical_for_dev_replace;
Zhao Lei57019342015-01-20 15:11:45 +080082 atomic_t refs;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040083 struct {
84 unsigned int mirror_num:8;
85 unsigned int have_csum:1;
86 unsigned int io_error:1;
87 };
Arne Jansena2de7332011-03-08 14:14:00 +010088 u8 csum[BTRFS_CSUM_SIZE];
Miao Xieaf8e2d12014-10-23 14:42:50 +080089
90 struct scrub_recover *recover;
Arne Jansena2de7332011-03-08 14:14:00 +010091};
92
93struct scrub_bio {
94 int index;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010095 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +010096 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010097 struct bio *bio;
98 int err;
99 u64 logical;
100 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100101#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
103#else
104 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
105#endif
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400106 int page_count;
Arne Jansena2de7332011-03-08 14:14:00 +0100107 int next_free;
108 struct btrfs_work work;
109};
110
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400111struct scrub_block {
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100112 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400113 int page_count;
114 atomic_t outstanding_pages;
Zhao Lei57019342015-01-20 15:11:45 +0800115 atomic_t refs; /* free mem on transition to zero */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100116 struct scrub_ctx *sctx;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800117 struct scrub_parity *sparity;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400118 struct {
119 unsigned int header_error:1;
120 unsigned int checksum_error:1;
121 unsigned int no_io_error_seen:1;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200122 unsigned int generation_error:1; /* also sets header_error */
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800123
124 /* The following is for the data used to check parity */
125 /* It is for the data with checksum */
126 unsigned int data_corrected:1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400127 };
Omar Sandoval73ff61d2015-06-19 11:52:51 -0700128 struct btrfs_work work;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400129};
130
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800131/* Used for the chunks with parity stripe such RAID5/6 */
132struct scrub_parity {
133 struct scrub_ctx *sctx;
134
135 struct btrfs_device *scrub_dev;
136
137 u64 logic_start;
138
139 u64 logic_end;
140
141 int nsectors;
142
143 int stripe_len;
144
Zhao Lei57019342015-01-20 15:11:45 +0800145 atomic_t refs;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800146
147 struct list_head spages;
148
149 /* Work of parity check and repair */
150 struct btrfs_work work;
151
152 /* Mark the parity blocks which have data */
153 unsigned long *dbitmap;
154
155 /*
156 * Mark the parity blocks which have data, but errors happen when
157 * read data or check data
158 */
159 unsigned long *ebitmap;
160
161 unsigned long bitmap[0];
162};
163
Stefan Behrensff023aa2012-11-06 11:43:11 +0100164struct scrub_wr_ctx {
165 struct scrub_bio *wr_curr_bio;
166 struct btrfs_device *tgtdev;
167 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168 atomic_t flush_all_writes;
169 struct mutex wr_lock;
170};
171
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100172struct scrub_ctx {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100173 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100174 struct btrfs_root *dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +0100175 int first_free;
176 int curr;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100177 atomic_t bios_in_flight;
178 atomic_t workers_pending;
Arne Jansena2de7332011-03-08 14:14:00 +0100179 spinlock_t list_lock;
180 wait_queue_head_t list_wait;
181 u16 csum_size;
182 struct list_head csum_list;
183 atomic_t cancel_req;
Arne Jansen86287642011-03-23 16:34:19 +0100184 int readonly;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100185 int pages_per_rd_bio;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400186 u32 sectorsize;
187 u32 nodesize;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100188
189 int is_dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100190 struct scrub_wr_ctx wr_ctx;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100191
Arne Jansena2de7332011-03-08 14:14:00 +0100192 /*
193 * statistics
194 */
195 struct btrfs_scrub_progress stat;
196 spinlock_t stat_lock;
Filipe Mananaf55985f2015-02-09 21:14:24 +0000197
198 /*
199 * Use a ref counter to avoid use-after-free issues. Scrub workers
200 * decrement bios_in_flight and workers_pending and then do a wakeup
201 * on the list_wait wait queue. We must ensure the main scrub task
202 * doesn't free the scrub context before or while the workers are
203 * doing the wakeup() call.
204 */
205 atomic_t refs;
Arne Jansena2de7332011-03-08 14:14:00 +0100206};
207
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200208struct scrub_fixup_nodatasum {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100209 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100210 struct btrfs_device *dev;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200211 u64 logical;
212 struct btrfs_root *root;
213 struct btrfs_work work;
214 int mirror_num;
215};
216
Josef Bacik652f25a2013-09-12 16:58:28 -0400217struct scrub_nocow_inode {
218 u64 inum;
219 u64 offset;
220 u64 root;
221 struct list_head list;
222};
223
Stefan Behrensff023aa2012-11-06 11:43:11 +0100224struct scrub_copy_nocow_ctx {
225 struct scrub_ctx *sctx;
226 u64 logical;
227 u64 len;
228 int mirror_num;
229 u64 physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -0400230 struct list_head inodes;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100231 struct btrfs_work work;
232};
233
Jan Schmidt558540c2011-06-13 19:59:12 +0200234struct scrub_warning {
235 struct btrfs_path *path;
236 u64 extent_item_size;
Jan Schmidt558540c2011-06-13 19:59:12 +0200237 const char *errstr;
238 sector_t sector;
239 u64 logical;
240 struct btrfs_device *dev;
Jan Schmidt558540c2011-06-13 19:59:12 +0200241};
242
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100243static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
244static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
245static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
246static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400247static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
Zhao Leibe50a8d2015-01-20 15:11:42 +0800248static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100249 struct scrub_block *sblocks_for_recheck);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100250static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
Zhao Leiaffe4a52015-08-24 21:32:06 +0800251 struct scrub_block *sblock,
252 int retry_failed_mirror);
Zhao Leiba7cf982015-08-24 21:18:02 +0800253static void scrub_recheck_block_checksum(struct scrub_block *sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400254static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +0800255 struct scrub_block *sblock_good);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400256static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
257 struct scrub_block *sblock_good,
258 int page_num, int force_write);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100259static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
260static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
261 int page_num);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400262static int scrub_checksum_data(struct scrub_block *sblock);
263static int scrub_checksum_tree_block(struct scrub_block *sblock);
264static int scrub_checksum_super(struct scrub_block *sblock);
265static void scrub_block_get(struct scrub_block *sblock);
266static void scrub_block_put(struct scrub_block *sblock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100267static void scrub_page_get(struct scrub_page *spage);
268static void scrub_page_put(struct scrub_page *spage);
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800269static void scrub_parity_get(struct scrub_parity *sparity);
270static void scrub_parity_put(struct scrub_parity *sparity);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100271static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
272 struct scrub_page *spage);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100273static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100274 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100275 u64 gen, int mirror_num, u8 *csum, int force,
276 u64 physical_for_dev_replace);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200277static void scrub_bio_end_io(struct bio *bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400278static void scrub_bio_end_io_worker(struct btrfs_work *work);
279static void scrub_block_complete(struct scrub_block *sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100280static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
281 u64 extent_logical, u64 extent_len,
282 u64 *extent_physical,
283 struct btrfs_device **extent_dev,
284 int *extent_mirror_num);
285static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
286 struct scrub_wr_ctx *wr_ctx,
287 struct btrfs_fs_info *fs_info,
288 struct btrfs_device *dev,
289 int is_dev_replace);
290static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
291static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
292 struct scrub_page *spage);
293static void scrub_wr_submit(struct scrub_ctx *sctx);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200294static void scrub_wr_bio_end_io(struct bio *bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100295static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
296static int write_page_nocow(struct scrub_ctx *sctx,
297 u64 physical_for_dev_replace, struct page *page);
298static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
Josef Bacik652f25a2013-09-12 16:58:28 -0400299 struct scrub_copy_nocow_ctx *ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100300static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
301 int mirror_num, u64 physical_for_dev_replace);
302static void copy_nocow_pages_worker(struct btrfs_work *work);
Wang Shilongcb7ab022013-12-04 21:16:53 +0800303static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Wang Shilong3cb09292013-12-04 21:15:19 +0800304static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000305static void scrub_put_ctx(struct scrub_ctx *sctx);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400306
307
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100308static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
309{
Filipe Mananaf55985f2015-02-09 21:14:24 +0000310 atomic_inc(&sctx->refs);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100311 atomic_inc(&sctx->bios_in_flight);
312}
313
314static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
315{
316 atomic_dec(&sctx->bios_in_flight);
317 wake_up(&sctx->list_wait);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000318 scrub_put_ctx(sctx);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100319}
320
Wang Shilongcb7ab022013-12-04 21:16:53 +0800321static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
Wang Shilong3cb09292013-12-04 21:15:19 +0800322{
323 while (atomic_read(&fs_info->scrub_pause_req)) {
324 mutex_unlock(&fs_info->scrub_lock);
325 wait_event(fs_info->scrub_pause_wait,
326 atomic_read(&fs_info->scrub_pause_req) == 0);
327 mutex_lock(&fs_info->scrub_lock);
328 }
329}
330
Zhaolei0e22be82015-08-05 16:43:28 +0800331static void scrub_pause_on(struct btrfs_fs_info *fs_info)
Wang Shilongcb7ab022013-12-04 21:16:53 +0800332{
333 atomic_inc(&fs_info->scrubs_paused);
334 wake_up(&fs_info->scrub_pause_wait);
Zhaolei0e22be82015-08-05 16:43:28 +0800335}
Wang Shilongcb7ab022013-12-04 21:16:53 +0800336
Zhaolei0e22be82015-08-05 16:43:28 +0800337static void scrub_pause_off(struct btrfs_fs_info *fs_info)
338{
Wang Shilongcb7ab022013-12-04 21:16:53 +0800339 mutex_lock(&fs_info->scrub_lock);
340 __scrub_blocked_if_needed(fs_info);
341 atomic_dec(&fs_info->scrubs_paused);
342 mutex_unlock(&fs_info->scrub_lock);
343
344 wake_up(&fs_info->scrub_pause_wait);
345}
346
Zhaolei0e22be82015-08-05 16:43:28 +0800347static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
348{
349 scrub_pause_on(fs_info);
350 scrub_pause_off(fs_info);
351}
352
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100353/*
354 * used for workers that require transaction commits (i.e., for the
355 * NOCOW case)
356 */
357static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
358{
359 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
360
Filipe Mananaf55985f2015-02-09 21:14:24 +0000361 atomic_inc(&sctx->refs);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100362 /*
363 * increment scrubs_running to prevent cancel requests from
364 * completing as long as a worker is running. we must also
365 * increment scrubs_paused to prevent deadlocking on pause
366 * requests used for transactions commits (as the worker uses a
367 * transaction context). it is safe to regard the worker
368 * as paused for all matters practical. effectively, we only
369 * avoid cancellation requests from completing.
370 */
371 mutex_lock(&fs_info->scrub_lock);
372 atomic_inc(&fs_info->scrubs_running);
373 atomic_inc(&fs_info->scrubs_paused);
374 mutex_unlock(&fs_info->scrub_lock);
Wang Shilong32a44782014-02-19 19:24:19 +0800375
376 /*
377 * check if @scrubs_running=@scrubs_paused condition
378 * inside wait_event() is not an atomic operation.
379 * which means we may inc/dec @scrub_running/paused
380 * at any time. Let's wake up @scrub_pause_wait as
381 * much as we can to let commit transaction blocked less.
382 */
383 wake_up(&fs_info->scrub_pause_wait);
384
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100385 atomic_inc(&sctx->workers_pending);
386}
387
388/* used for workers that require transaction commits */
389static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
390{
391 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
392
393 /*
394 * see scrub_pending_trans_workers_inc() why we're pretending
395 * to be paused in the scrub counters
396 */
397 mutex_lock(&fs_info->scrub_lock);
398 atomic_dec(&fs_info->scrubs_running);
399 atomic_dec(&fs_info->scrubs_paused);
400 mutex_unlock(&fs_info->scrub_lock);
401 atomic_dec(&sctx->workers_pending);
402 wake_up(&fs_info->scrub_pause_wait);
403 wake_up(&sctx->list_wait);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000404 scrub_put_ctx(sctx);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100405}
406
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100407static void scrub_free_csums(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100408{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100409 while (!list_empty(&sctx->csum_list)) {
Arne Jansena2de7332011-03-08 14:14:00 +0100410 struct btrfs_ordered_sum *sum;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100411 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +0100412 struct btrfs_ordered_sum, list);
413 list_del(&sum->list);
414 kfree(sum);
415 }
416}
417
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100418static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100419{
420 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100421
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100422 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100423 return;
424
Stefan Behrensff023aa2012-11-06 11:43:11 +0100425 scrub_free_wr_ctx(&sctx->wr_ctx);
426
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400427 /* this can happen when scrub is cancelled */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100428 if (sctx->curr != -1) {
429 struct scrub_bio *sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400430
431 for (i = 0; i < sbio->page_count; i++) {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100432 WARN_ON(!sbio->pagev[i]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400433 scrub_block_put(sbio->pagev[i]->sblock);
434 }
435 bio_put(sbio->bio);
436 }
437
Stefan Behrensff023aa2012-11-06 11:43:11 +0100438 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100439 struct scrub_bio *sbio = sctx->bios[i];
Arne Jansena2de7332011-03-08 14:14:00 +0100440
441 if (!sbio)
442 break;
Arne Jansena2de7332011-03-08 14:14:00 +0100443 kfree(sbio);
444 }
445
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100446 scrub_free_csums(sctx);
447 kfree(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100448}
449
Filipe Mananaf55985f2015-02-09 21:14:24 +0000450static void scrub_put_ctx(struct scrub_ctx *sctx)
451{
452 if (atomic_dec_and_test(&sctx->refs))
453 scrub_free_ctx(sctx);
454}
455
Arne Jansena2de7332011-03-08 14:14:00 +0100456static noinline_for_stack
Stefan Behrens63a212a2012-11-05 18:29:28 +0100457struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +0100458{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100459 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100460 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100461 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100462 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +0100463
David Sterba58c4e172016-02-11 10:49:42 +0100464 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100465 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100466 goto nomem;
Filipe Mananaf55985f2015-02-09 21:14:24 +0000467 atomic_set(&sctx->refs, 1);
Stefan Behrens63a212a2012-11-05 18:29:28 +0100468 sctx->is_dev_replace = is_dev_replace;
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200469 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100470 sctx->curr = -1;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100471 sctx->dev_root = dev->dev_root;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100472 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Arne Jansena2de7332011-03-08 14:14:00 +0100473 struct scrub_bio *sbio;
474
David Sterba58c4e172016-02-11 10:49:42 +0100475 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
Arne Jansena2de7332011-03-08 14:14:00 +0100476 if (!sbio)
477 goto nomem;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100478 sctx->bios[i] = sbio;
Arne Jansena2de7332011-03-08 14:14:00 +0100479
Arne Jansena2de7332011-03-08 14:14:00 +0100480 sbio->index = i;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100481 sbio->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400482 sbio->page_count = 0;
Liu Bo9e0af232014-08-15 23:36:53 +0800483 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
484 scrub_bio_end_io_worker, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +0100485
Stefan Behrensff023aa2012-11-06 11:43:11 +0100486 if (i != SCRUB_BIOS_PER_SCTX - 1)
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100487 sctx->bios[i]->next_free = i + 1;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200488 else
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100489 sctx->bios[i]->next_free = -1;
Arne Jansena2de7332011-03-08 14:14:00 +0100490 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100491 sctx->first_free = 0;
492 sctx->nodesize = dev->dev_root->nodesize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100493 sctx->sectorsize = dev->dev_root->sectorsize;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100494 atomic_set(&sctx->bios_in_flight, 0);
495 atomic_set(&sctx->workers_pending, 0);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100496 atomic_set(&sctx->cancel_req, 0);
497 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
498 INIT_LIST_HEAD(&sctx->csum_list);
Arne Jansena2de7332011-03-08 14:14:00 +0100499
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100500 spin_lock_init(&sctx->list_lock);
501 spin_lock_init(&sctx->stat_lock);
502 init_waitqueue_head(&sctx->list_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100503
504 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
505 fs_info->dev_replace.tgtdev, is_dev_replace);
506 if (ret) {
507 scrub_free_ctx(sctx);
508 return ERR_PTR(ret);
509 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100510 return sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100511
512nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100513 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100514 return ERR_PTR(-ENOMEM);
515}
516
Stefan Behrensff023aa2012-11-06 11:43:11 +0100517static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
518 void *warn_ctx)
Jan Schmidt558540c2011-06-13 19:59:12 +0200519{
520 u64 isize;
521 u32 nlink;
522 int ret;
523 int i;
524 struct extent_buffer *eb;
525 struct btrfs_inode_item *inode_item;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100526 struct scrub_warning *swarn = warn_ctx;
Jan Schmidt558540c2011-06-13 19:59:12 +0200527 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
528 struct inode_fs_paths *ipath = NULL;
529 struct btrfs_root *local_root;
530 struct btrfs_key root_key;
David Sterba1d4c08e2015-01-02 19:36:14 +0100531 struct btrfs_key key;
Jan Schmidt558540c2011-06-13 19:59:12 +0200532
533 root_key.objectid = root;
534 root_key.type = BTRFS_ROOT_ITEM_KEY;
535 root_key.offset = (u64)-1;
536 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
537 if (IS_ERR(local_root)) {
538 ret = PTR_ERR(local_root);
539 goto err;
540 }
541
David Sterba14692cc2015-01-02 18:55:46 +0100542 /*
543 * this makes the path point to (inum INODE_ITEM ioff)
544 */
David Sterba1d4c08e2015-01-02 19:36:14 +0100545 key.objectid = inum;
546 key.type = BTRFS_INODE_ITEM_KEY;
547 key.offset = 0;
548
549 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
Jan Schmidt558540c2011-06-13 19:59:12 +0200550 if (ret) {
551 btrfs_release_path(swarn->path);
552 goto err;
553 }
554
555 eb = swarn->path->nodes[0];
556 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
557 struct btrfs_inode_item);
558 isize = btrfs_inode_size(eb, inode_item);
559 nlink = btrfs_inode_nlink(eb, inode_item);
560 btrfs_release_path(swarn->path);
561
562 ipath = init_ipath(4096, local_root, swarn->path);
Dan Carpenter26bdef52011-11-16 11:28:01 +0300563 if (IS_ERR(ipath)) {
564 ret = PTR_ERR(ipath);
565 ipath = NULL;
566 goto err;
567 }
Jan Schmidt558540c2011-06-13 19:59:12 +0200568 ret = paths_from_inode(inum, ipath);
569
570 if (ret < 0)
571 goto err;
572
573 /*
574 * we deliberately ignore the bit ipath might have been too small to
575 * hold all of the paths here
576 */
577 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
David Sterbaecaeb142015-10-08 09:01:03 +0200578 btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200579 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
David Sterbaecaeb142015-10-08 09:01:03 +0200580 "length %llu, links %u (path: %s)", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400581 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200582 (unsigned long long)swarn->sector, root, inum, offset,
583 min(isize - offset, (u64)PAGE_SIZE), nlink,
Jeff Mahoney745c4d82011-11-20 07:31:57 -0500584 (char *)(unsigned long)ipath->fspath->val[i]);
Jan Schmidt558540c2011-06-13 19:59:12 +0200585
586 free_ipath(ipath);
587 return 0;
588
589err:
David Sterbaecaeb142015-10-08 09:01:03 +0200590 btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200591 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
David Sterbaecaeb142015-10-08 09:01:03 +0200592 "resolving failed with ret=%d", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400593 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200594 (unsigned long long)swarn->sector, root, inum, offset, ret);
595
596 free_ipath(ipath);
597 return 0;
598}
599
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400600static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
Jan Schmidt558540c2011-06-13 19:59:12 +0200601{
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100602 struct btrfs_device *dev;
603 struct btrfs_fs_info *fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200604 struct btrfs_path *path;
605 struct btrfs_key found_key;
606 struct extent_buffer *eb;
607 struct btrfs_extent_item *ei;
608 struct scrub_warning swarn;
Jan Schmidt558540c2011-06-13 19:59:12 +0200609 unsigned long ptr = 0;
Jan Schmidt4692cf52011-12-02 14:56:41 +0100610 u64 extent_item_pos;
Liu Bo69917e42012-09-07 20:01:28 -0600611 u64 flags = 0;
612 u64 ref_root;
613 u32 item_size;
Dan Carpenter07c9a8e2016-03-11 11:08:56 +0300614 u8 ref_level = 0;
Liu Bo69917e42012-09-07 20:01:28 -0600615 int ret;
Jan Schmidt558540c2011-06-13 19:59:12 +0200616
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100617 WARN_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100618 dev = sblock->pagev[0]->dev;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100619 fs_info = sblock->sctx->dev_root->fs_info;
620
Jan Schmidt558540c2011-06-13 19:59:12 +0200621 path = btrfs_alloc_path();
David Sterba8b9456d2014-07-30 01:25:30 +0200622 if (!path)
623 return;
Jan Schmidt558540c2011-06-13 19:59:12 +0200624
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100625 swarn.sector = (sblock->pagev[0]->physical) >> 9;
626 swarn.logical = sblock->pagev[0]->logical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200627 swarn.errstr = errstr;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100628 swarn.dev = NULL;
Jan Schmidt558540c2011-06-13 19:59:12 +0200629
Liu Bo69917e42012-09-07 20:01:28 -0600630 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
631 &flags);
Jan Schmidt558540c2011-06-13 19:59:12 +0200632 if (ret < 0)
633 goto out;
634
Jan Schmidt4692cf52011-12-02 14:56:41 +0100635 extent_item_pos = swarn.logical - found_key.objectid;
Jan Schmidt558540c2011-06-13 19:59:12 +0200636 swarn.extent_item_size = found_key.offset;
637
638 eb = path->nodes[0];
639 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
640 item_size = btrfs_item_size_nr(eb, path->slots[0]);
641
Liu Bo69917e42012-09-07 20:01:28 -0600642 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Jan Schmidt558540c2011-06-13 19:59:12 +0200643 do {
Liu Bo6eda71d2014-06-09 10:54:07 +0800644 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
645 item_size, &ref_root,
646 &ref_level);
David Sterbaecaeb142015-10-08 09:01:03 +0200647 btrfs_warn_in_rcu(fs_info,
648 "%s at logical %llu on dev %s, "
Jan Schmidt558540c2011-06-13 19:59:12 +0200649 "sector %llu: metadata %s (level %d) in tree "
David Sterbaecaeb142015-10-08 09:01:03 +0200650 "%llu", errstr, swarn.logical,
Josef Bacik606686e2012-06-04 14:03:51 -0400651 rcu_str_deref(dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200652 (unsigned long long)swarn.sector,
653 ref_level ? "node" : "leaf",
654 ret < 0 ? -1 : ref_level,
655 ret < 0 ? -1 : ref_root);
656 } while (ret != 1);
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600657 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200658 } else {
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600659 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200660 swarn.path = path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100661 swarn.dev = dev;
Jan Schmidt7a3ae2f2012-03-23 17:32:28 +0100662 iterate_extent_inodes(fs_info, found_key.objectid,
663 extent_item_pos, 1,
Jan Schmidt558540c2011-06-13 19:59:12 +0200664 scrub_print_warning_inode, &swarn);
665 }
666
667out:
668 btrfs_free_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200669}
670
Stefan Behrensff023aa2012-11-06 11:43:11 +0100671static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200672{
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200673 struct page *page = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200674 unsigned long index;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100675 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200676 int ret;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200677 int corrected = 0;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200678 struct btrfs_key key;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200679 struct inode *inode = NULL;
Liu Bo6f1c3602013-01-29 03:22:10 +0000680 struct btrfs_fs_info *fs_info;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200681 u64 end = offset + PAGE_SIZE - 1;
682 struct btrfs_root *local_root;
Liu Bo6f1c3602013-01-29 03:22:10 +0000683 int srcu_index;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200684
685 key.objectid = root;
686 key.type = BTRFS_ROOT_ITEM_KEY;
687 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +0000688
689 fs_info = fixup->root->fs_info;
690 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
691
692 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
693 if (IS_ERR(local_root)) {
694 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200695 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +0000696 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200697
698 key.type = BTRFS_INODE_ITEM_KEY;
699 key.objectid = inum;
700 key.offset = 0;
Liu Bo6f1c3602013-01-29 03:22:10 +0000701 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
702 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200703 if (IS_ERR(inode))
704 return PTR_ERR(inode);
705
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300706 index = offset >> PAGE_SHIFT;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200707
708 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200709 if (!page) {
710 ret = -ENOMEM;
711 goto out;
712 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200713
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200714 if (PageUptodate(page)) {
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200715 if (PageDirty(page)) {
716 /*
717 * we need to write the data to the defect sector. the
718 * data that was in that sector is not in memory,
719 * because the page was modified. we must not write the
720 * modified page to that sector.
721 *
722 * TODO: what could be done here: wait for the delalloc
723 * runner to write out that page (might involve
724 * COW) and see whether the sector is still
725 * referenced afterwards.
726 *
727 * For the meantime, we'll treat this error
728 * incorrectable, although there is a chance that a
729 * later scrub will find the bad sector again and that
730 * there's no dirty page in memory, then.
731 */
732 ret = -EIO;
733 goto out;
734 }
Miao Xie1203b682014-09-12 18:44:01 +0800735 ret = repair_io_failure(inode, offset, PAGE_SIZE,
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200736 fixup->logical, page,
Miao Xieffdd2012014-09-12 18:44:00 +0800737 offset - page_offset(page),
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200738 fixup->mirror_num);
739 unlock_page(page);
740 corrected = !ret;
741 } else {
742 /*
743 * we need to get good data first. the general readpage path
744 * will call repair_io_failure for us, we just have to make
745 * sure we read the bad mirror.
746 */
747 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
David Sterbaceeb0ae2016-04-26 23:54:39 +0200748 EXTENT_DAMAGED);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200749 if (ret) {
750 /* set_extent_bits should give proper error */
751 WARN_ON(ret > 0);
752 if (ret > 0)
753 ret = -EFAULT;
754 goto out;
755 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200756
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200757 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
758 btrfs_get_extent,
759 fixup->mirror_num);
760 wait_on_page_locked(page);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200761
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200762 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
763 end, EXTENT_DAMAGED, 0, NULL);
764 if (!corrected)
765 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
David Sterba91166212016-04-26 23:54:39 +0200766 EXTENT_DAMAGED);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200767 }
768
769out:
770 if (page)
771 put_page(page);
Tobias Klauser7fb18a02014-04-25 14:58:05 +0200772
773 iput(inode);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200774
775 if (ret < 0)
776 return ret;
777
778 if (ret == 0 && corrected) {
779 /*
780 * we only need to call readpage for one of the inodes belonging
781 * to this extent. so make iterate_extent_inodes stop
782 */
783 return 1;
784 }
785
786 return -EIO;
787}
788
789static void scrub_fixup_nodatasum(struct btrfs_work *work)
790{
791 int ret;
792 struct scrub_fixup_nodatasum *fixup;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100793 struct scrub_ctx *sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200794 struct btrfs_trans_handle *trans = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200795 struct btrfs_path *path;
796 int uncorrectable = 0;
797
798 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100799 sctx = fixup->sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200800
801 path = btrfs_alloc_path();
802 if (!path) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100803 spin_lock(&sctx->stat_lock);
804 ++sctx->stat.malloc_errors;
805 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200806 uncorrectable = 1;
807 goto out;
808 }
809
810 trans = btrfs_join_transaction(fixup->root);
811 if (IS_ERR(trans)) {
812 uncorrectable = 1;
813 goto out;
814 }
815
816 /*
817 * the idea is to trigger a regular read through the standard path. we
818 * read a page from the (failed) logical address by specifying the
819 * corresponding copynum of the failed sector. thus, that readpage is
820 * expected to fail.
821 * that is the point where on-the-fly error correction will kick in
822 * (once it's finished) and rewrite the failed sector if a good copy
823 * can be found.
824 */
825 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
826 path, scrub_fixup_readpage,
827 fixup);
828 if (ret < 0) {
829 uncorrectable = 1;
830 goto out;
831 }
832 WARN_ON(ret != 1);
833
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100834 spin_lock(&sctx->stat_lock);
835 ++sctx->stat.corrected_errors;
836 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200837
838out:
839 if (trans && !IS_ERR(trans))
840 btrfs_end_transaction(trans, fixup->root);
841 if (uncorrectable) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100842 spin_lock(&sctx->stat_lock);
843 ++sctx->stat.uncorrectable_errors;
844 spin_unlock(&sctx->stat_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100845 btrfs_dev_replace_stats_inc(
846 &sctx->dev_root->fs_info->dev_replace.
847 num_uncorrectable_read_errors);
David Sterbab14af3b2015-10-08 10:43:10 +0200848 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
849 "unable to fixup (nodatasum) error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200850 fixup->logical, rcu_str_deref(fixup->dev->name));
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200851 }
852
853 btrfs_free_path(path);
854 kfree(fixup);
855
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100856 scrub_pending_trans_workers_dec(sctx);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200857}
858
Miao Xieaf8e2d12014-10-23 14:42:50 +0800859static inline void scrub_get_recover(struct scrub_recover *recover)
860{
861 atomic_inc(&recover->refs);
862}
863
864static inline void scrub_put_recover(struct scrub_recover *recover)
865{
866 if (atomic_dec_and_test(&recover->refs)) {
Zhao Lei6e9606d2015-01-20 15:11:34 +0800867 btrfs_put_bbio(recover->bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +0800868 kfree(recover);
869 }
870}
871
Arne Jansena2de7332011-03-08 14:14:00 +0100872/*
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400873 * scrub_handle_errored_block gets called when either verification of the
874 * pages failed or the bio failed to read, e.g. with EIO. In the latter
875 * case, this function handles all pages in the bio, even though only one
876 * may be bad.
877 * The goal of this function is to repair the errored block by using the
878 * contents of one of the mirrors.
Arne Jansena2de7332011-03-08 14:14:00 +0100879 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400880static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
Arne Jansena2de7332011-03-08 14:14:00 +0100881{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100882 struct scrub_ctx *sctx = sblock_to_check->sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100883 struct btrfs_device *dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400884 struct btrfs_fs_info *fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100885 u64 length;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400886 u64 logical;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400887 unsigned int failed_mirror_index;
888 unsigned int is_metadata;
889 unsigned int have_csum;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400890 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
891 struct scrub_block *sblock_bad;
Arne Jansena2de7332011-03-08 14:14:00 +0100892 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400893 int mirror_index;
894 int page_num;
895 int success;
896 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
897 DEFAULT_RATELIMIT_BURST);
Arne Jansena2de7332011-03-08 14:14:00 +0100898
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400899 BUG_ON(sblock_to_check->page_count < 1);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100900 fs_info = sctx->dev_root->fs_info;
Stefan Behrens4ded4f62012-11-14 18:57:29 +0000901 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
902 /*
903 * if we find an error in a super block, we just report it.
904 * They will get written with the next transaction commit
905 * anyway
906 */
907 spin_lock(&sctx->stat_lock);
908 ++sctx->stat.super_errors;
909 spin_unlock(&sctx->stat_lock);
910 return 0;
911 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400912 length = sblock_to_check->page_count * PAGE_SIZE;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100913 logical = sblock_to_check->pagev[0]->logical;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100914 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
915 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
916 is_metadata = !(sblock_to_check->pagev[0]->flags &
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400917 BTRFS_EXTENT_FLAG_DATA);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100918 have_csum = sblock_to_check->pagev[0]->have_csum;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100919 dev = sblock_to_check->pagev[0]->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400920
Stefan Behrensff023aa2012-11-06 11:43:11 +0100921 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
922 sblocks_for_recheck = NULL;
923 goto nodatasum_case;
924 }
925
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400926 /*
927 * read all mirrors one after the other. This includes to
928 * re-read the extent or metadata block that failed (that was
929 * the cause that this fixup code is called) another time,
930 * page by page this time in order to know which pages
931 * caused I/O errors and which ones are good (for all mirrors).
932 * It is the goal to handle the situation when more than one
933 * mirror contains I/O errors, but the errors do not
934 * overlap, i.e. the data can be repaired by selecting the
935 * pages from those mirrors without I/O error on the
936 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
937 * would be that mirror #1 has an I/O error on the first page,
938 * the second page is good, and mirror #2 has an I/O error on
939 * the second page, but the first page is good.
940 * Then the first page of the first mirror can be repaired by
941 * taking the first page of the second mirror, and the
942 * second page of the second mirror can be repaired by
943 * copying the contents of the 2nd page of the 1st mirror.
944 * One more note: if the pages of one mirror contain I/O
945 * errors, the checksum cannot be verified. In order to get
946 * the best data for repairing, the first attempt is to find
947 * a mirror without I/O errors and with a validated checksum.
948 * Only if this is not possible, the pages are picked from
949 * mirrors with I/O errors without considering the checksum.
950 * If the latter is the case, at the end, the checksum of the
951 * repaired area is verified in order to correctly maintain
952 * the statistics.
953 */
954
David Sterba31e818f2015-02-20 18:00:26 +0100955 sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
956 sizeof(*sblocks_for_recheck), GFP_NOFS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400957 if (!sblocks_for_recheck) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100958 spin_lock(&sctx->stat_lock);
959 sctx->stat.malloc_errors++;
960 sctx->stat.read_errors++;
961 sctx->stat.uncorrectable_errors++;
962 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100963 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400964 goto out;
965 }
966
967 /* setup the context, map the logical blocks and alloc the pages */
Zhao Leibe50a8d2015-01-20 15:11:42 +0800968 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400969 if (ret) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100970 spin_lock(&sctx->stat_lock);
971 sctx->stat.read_errors++;
972 sctx->stat.uncorrectable_errors++;
973 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100974 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400975 goto out;
976 }
977 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
978 sblock_bad = sblocks_for_recheck + failed_mirror_index;
979
980 /* build and submit the bios for the failed mirror, check checksums */
Zhao Leiaffe4a52015-08-24 21:32:06 +0800981 scrub_recheck_block(fs_info, sblock_bad, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400982
983 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
984 sblock_bad->no_io_error_seen) {
985 /*
986 * the error disappeared after reading page by page, or
987 * the area was part of a huge bio and other parts of the
988 * bio caused I/O errors, or the block layer merged several
989 * read requests into one and the error is caused by a
990 * different bio (usually one of the two latter cases is
991 * the cause)
992 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100993 spin_lock(&sctx->stat_lock);
994 sctx->stat.unverified_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800995 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100996 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400997
Stefan Behrensff023aa2012-11-06 11:43:11 +0100998 if (sctx->is_dev_replace)
999 scrub_write_block_to_dev_replace(sblock_bad);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001000 goto out;
1001 }
1002
1003 if (!sblock_bad->no_io_error_seen) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001004 spin_lock(&sctx->stat_lock);
1005 sctx->stat.read_errors++;
1006 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001007 if (__ratelimit(&_rs))
1008 scrub_print_warning("i/o error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001009 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001010 } else if (sblock_bad->checksum_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001011 spin_lock(&sctx->stat_lock);
1012 sctx->stat.csum_errors++;
1013 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001014 if (__ratelimit(&_rs))
1015 scrub_print_warning("checksum error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001016 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001017 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001018 } else if (sblock_bad->header_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001019 spin_lock(&sctx->stat_lock);
1020 sctx->stat.verify_errors++;
1021 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001022 if (__ratelimit(&_rs))
1023 scrub_print_warning("checksum/header error",
1024 sblock_to_check);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001025 if (sblock_bad->generation_error)
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001026 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001027 BTRFS_DEV_STAT_GENERATION_ERRS);
1028 else
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001029 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001030 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001031 }
1032
Ilya Dryomov33ef30a2013-11-03 19:06:38 +02001033 if (sctx->readonly) {
1034 ASSERT(!sctx->is_dev_replace);
1035 goto out;
1036 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001037
1038 if (!is_metadata && !have_csum) {
1039 struct scrub_fixup_nodatasum *fixup_nodatasum;
1040
Stefan Behrensff023aa2012-11-06 11:43:11 +01001041 WARN_ON(sctx->is_dev_replace);
1042
Zhao Leib25c94c2015-01-20 15:11:35 +08001043nodatasum_case:
1044
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001045 /*
1046 * !is_metadata and !have_csum, this means that the data
Nicholas D Steeves01327612016-05-19 21:18:45 -04001047 * might not be COWed, that it might be modified
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001048 * concurrently. The general strategy to work on the
1049 * commit root does not help in the case when COW is not
1050 * used.
1051 */
1052 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1053 if (!fixup_nodatasum)
1054 goto did_not_correct_error;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001055 fixup_nodatasum->sctx = sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001056 fixup_nodatasum->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001057 fixup_nodatasum->logical = logical;
1058 fixup_nodatasum->root = fs_info->extent_root;
1059 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001060 scrub_pending_trans_workers_inc(sctx);
Liu Bo9e0af232014-08-15 23:36:53 +08001061 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1062 scrub_fixup_nodatasum, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001063 btrfs_queue_work(fs_info->scrub_workers,
1064 &fixup_nodatasum->work);
Arne Jansena2de7332011-03-08 14:14:00 +01001065 goto out;
1066 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001067
1068 /*
1069 * now build and submit the bios for the other mirrors, check
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001070 * checksums.
1071 * First try to pick the mirror which is completely without I/O
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001072 * errors and also does not have a checksum error.
1073 * If one is found, and if a checksum is present, the full block
1074 * that is known to contain an error is rewritten. Afterwards
1075 * the block is known to be corrected.
1076 * If a mirror is found which is completely correct, and no
1077 * checksum is present, only those pages are rewritten that had
1078 * an I/O error in the block to be repaired, since it cannot be
1079 * determined, which copy of the other pages is better (and it
1080 * could happen otherwise that a correct page would be
1081 * overwritten by a bad one).
1082 */
1083 for (mirror_index = 0;
1084 mirror_index < BTRFS_MAX_MIRRORS &&
1085 sblocks_for_recheck[mirror_index].page_count > 0;
1086 mirror_index++) {
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001087 struct scrub_block *sblock_other;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001088
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001089 if (mirror_index == failed_mirror_index)
1090 continue;
1091 sblock_other = sblocks_for_recheck + mirror_index;
1092
1093 /* build and submit the bios, check checksums */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001094 scrub_recheck_block(fs_info, sblock_other, 0);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001095
1096 if (!sblock_other->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001097 !sblock_other->checksum_error &&
1098 sblock_other->no_io_error_seen) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01001099 if (sctx->is_dev_replace) {
1100 scrub_write_block_to_dev_replace(sblock_other);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001101 goto corrected_error;
Zhao Lei114ab502015-01-20 15:11:36 +08001102 } else {
1103 ret = scrub_repair_block_from_good_copy(
1104 sblock_bad, sblock_other);
1105 if (!ret)
1106 goto corrected_error;
1107 }
Arne Jansena2de7332011-03-08 14:14:00 +01001108 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001109 }
1110
Zhao Leib968fed2015-01-20 15:11:41 +08001111 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1112 goto did_not_correct_error;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001113
1114 /*
Stefan Behrensff023aa2012-11-06 11:43:11 +01001115 * In case of I/O errors in the area that is supposed to be
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001116 * repaired, continue by picking good copies of those pages.
1117 * Select the good pages from mirrors to rewrite bad pages from
1118 * the area to fix. Afterwards verify the checksum of the block
1119 * that is supposed to be repaired. This verification step is
1120 * only done for the purpose of statistic counting and for the
1121 * final scrub report, whether errors remain.
1122 * A perfect algorithm could make use of the checksum and try
1123 * all possible combinations of pages from the different mirrors
1124 * until the checksum verification succeeds. For example, when
1125 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1126 * of mirror #2 is readable but the final checksum test fails,
1127 * then the 2nd page of mirror #3 could be tried, whether now
Nicholas D Steeves01327612016-05-19 21:18:45 -04001128 * the final checksum succeeds. But this would be a rare
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001129 * exception and is therefore not implemented. At least it is
1130 * avoided that the good copy is overwritten.
1131 * A more useful improvement would be to pick the sectors
1132 * without I/O error based on sector sizes (512 bytes on legacy
1133 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1134 * mirror could be repaired by taking 512 byte of a different
1135 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1136 * area are unreadable.
1137 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001138 success = 1;
Zhao Leib968fed2015-01-20 15:11:41 +08001139 for (page_num = 0; page_num < sblock_bad->page_count;
1140 page_num++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001141 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
Zhao Leib968fed2015-01-20 15:11:41 +08001142 struct scrub_block *sblock_other = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001143
Zhao Leib968fed2015-01-20 15:11:41 +08001144 /* skip no-io-error page in scrub */
1145 if (!page_bad->io_error && !sctx->is_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001146 continue;
1147
Zhao Leib968fed2015-01-20 15:11:41 +08001148 /* try to find no-io-error page in mirrors */
1149 if (page_bad->io_error) {
1150 for (mirror_index = 0;
1151 mirror_index < BTRFS_MAX_MIRRORS &&
1152 sblocks_for_recheck[mirror_index].page_count > 0;
1153 mirror_index++) {
1154 if (!sblocks_for_recheck[mirror_index].
1155 pagev[page_num]->io_error) {
1156 sblock_other = sblocks_for_recheck +
1157 mirror_index;
1158 break;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001159 }
Jan Schmidt13db62b2011-06-13 19:56:13 +02001160 }
Zhao Leib968fed2015-01-20 15:11:41 +08001161 if (!sblock_other)
1162 success = 0;
Jan Schmidt13db62b2011-06-13 19:56:13 +02001163 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001164
Zhao Leib968fed2015-01-20 15:11:41 +08001165 if (sctx->is_dev_replace) {
1166 /*
1167 * did not find a mirror to fetch the page
1168 * from. scrub_write_page_to_dev_replace()
1169 * handles this case (page->io_error), by
1170 * filling the block with zeros before
1171 * submitting the write request
1172 */
1173 if (!sblock_other)
1174 sblock_other = sblock_bad;
1175
1176 if (scrub_write_page_to_dev_replace(sblock_other,
1177 page_num) != 0) {
1178 btrfs_dev_replace_stats_inc(
1179 &sctx->dev_root->
1180 fs_info->dev_replace.
1181 num_write_errors);
1182 success = 0;
1183 }
1184 } else if (sblock_other) {
1185 ret = scrub_repair_page_from_good_copy(sblock_bad,
1186 sblock_other,
1187 page_num, 0);
1188 if (0 == ret)
1189 page_bad->io_error = 0;
1190 else
1191 success = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001192 }
1193 }
1194
Zhao Leib968fed2015-01-20 15:11:41 +08001195 if (success && !sctx->is_dev_replace) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001196 if (is_metadata || have_csum) {
1197 /*
1198 * need to verify the checksum now that all
1199 * sectors on disk are repaired (the write
1200 * request for data to be repaired is on its way).
1201 * Just be lazy and use scrub_recheck_block()
1202 * which re-reads the data before the checksum
1203 * is verified, but most likely the data comes out
1204 * of the page cache.
1205 */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001206 scrub_recheck_block(fs_info, sblock_bad, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001207 if (!sblock_bad->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001208 !sblock_bad->checksum_error &&
1209 sblock_bad->no_io_error_seen)
1210 goto corrected_error;
1211 else
1212 goto did_not_correct_error;
1213 } else {
1214corrected_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001215 spin_lock(&sctx->stat_lock);
1216 sctx->stat.corrected_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001217 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001218 spin_unlock(&sctx->stat_lock);
David Sterbab14af3b2015-10-08 10:43:10 +02001219 btrfs_err_rl_in_rcu(fs_info,
1220 "fixed up error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001221 logical, rcu_str_deref(dev->name));
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001222 }
1223 } else {
1224did_not_correct_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001225 spin_lock(&sctx->stat_lock);
1226 sctx->stat.uncorrectable_errors++;
1227 spin_unlock(&sctx->stat_lock);
David Sterbab14af3b2015-10-08 10:43:10 +02001228 btrfs_err_rl_in_rcu(fs_info,
1229 "unable to fixup (regular) error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001230 logical, rcu_str_deref(dev->name));
Arne Jansena2de7332011-03-08 14:14:00 +01001231 }
1232
1233out:
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001234 if (sblocks_for_recheck) {
1235 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1236 mirror_index++) {
1237 struct scrub_block *sblock = sblocks_for_recheck +
1238 mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001239 struct scrub_recover *recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001240 int page_index;
1241
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001242 for (page_index = 0; page_index < sblock->page_count;
1243 page_index++) {
1244 sblock->pagev[page_index]->sblock = NULL;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001245 recover = sblock->pagev[page_index]->recover;
1246 if (recover) {
1247 scrub_put_recover(recover);
1248 sblock->pagev[page_index]->recover =
1249 NULL;
1250 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001251 scrub_page_put(sblock->pagev[page_index]);
1252 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001253 }
1254 kfree(sblocks_for_recheck);
1255 }
1256
1257 return 0;
Arne Jansena2de7332011-03-08 14:14:00 +01001258}
1259
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001260static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001261{
Zhao Lei10f11902015-01-20 15:11:43 +08001262 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1263 return 2;
1264 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1265 return 3;
1266 else
Miao Xieaf8e2d12014-10-23 14:42:50 +08001267 return (int)bbio->num_stripes;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001268}
1269
Zhao Lei10f11902015-01-20 15:11:43 +08001270static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1271 u64 *raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001272 u64 mapped_length,
1273 int nstripes, int mirror,
1274 int *stripe_index,
1275 u64 *stripe_offset)
1276{
1277 int i;
1278
Zhao Leiffe2d202015-01-20 15:11:44 +08001279 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001280 /* RAID5/6 */
1281 for (i = 0; i < nstripes; i++) {
1282 if (raid_map[i] == RAID6_Q_STRIPE ||
1283 raid_map[i] == RAID5_P_STRIPE)
1284 continue;
1285
1286 if (logical >= raid_map[i] &&
1287 logical < raid_map[i] + mapped_length)
1288 break;
1289 }
1290
1291 *stripe_index = i;
1292 *stripe_offset = logical - raid_map[i];
1293 } else {
1294 /* The other RAID type */
1295 *stripe_index = mirror;
1296 *stripe_offset = 0;
1297 }
1298}
1299
Zhao Leibe50a8d2015-01-20 15:11:42 +08001300static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001301 struct scrub_block *sblocks_for_recheck)
Arne Jansena2de7332011-03-08 14:14:00 +01001302{
Zhao Leibe50a8d2015-01-20 15:11:42 +08001303 struct scrub_ctx *sctx = original_sblock->sctx;
1304 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
1305 u64 length = original_sblock->page_count * PAGE_SIZE;
1306 u64 logical = original_sblock->pagev[0]->logical;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001307 u64 generation = original_sblock->pagev[0]->generation;
1308 u64 flags = original_sblock->pagev[0]->flags;
1309 u64 have_csum = original_sblock->pagev[0]->have_csum;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001310 struct scrub_recover *recover;
1311 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001312 u64 sublen;
1313 u64 mapped_length;
1314 u64 stripe_offset;
1315 int stripe_index;
Zhao Leibe50a8d2015-01-20 15:11:42 +08001316 int page_index = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001317 int mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001318 int nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001319 int ret;
1320
1321 /*
Zhao Lei57019342015-01-20 15:11:45 +08001322 * note: the two members refs and outstanding_pages
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001323 * are not used (and not set) in the blocks that are used for
1324 * the recheck procedure
1325 */
1326
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001327 while (length > 0) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001328 sublen = min_t(u64, length, PAGE_SIZE);
1329 mapped_length = sublen;
1330 bbio = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001331
1332 /*
1333 * with a length of PAGE_SIZE, each returned stripe
1334 * represents one mirror
1335 */
Miao Xieaf8e2d12014-10-23 14:42:50 +08001336 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001337 &mapped_length, &bbio, 0, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001338 if (ret || !bbio || mapped_length < sublen) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001339 btrfs_put_bbio(bbio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001340 return -EIO;
1341 }
1342
Miao Xieaf8e2d12014-10-23 14:42:50 +08001343 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1344 if (!recover) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001345 btrfs_put_bbio(bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001346 return -ENOMEM;
1347 }
1348
1349 atomic_set(&recover->refs, 1);
1350 recover->bbio = bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001351 recover->map_length = mapped_length;
1352
Ashish Samant24731142016-04-29 18:33:59 -07001353 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001354
Zhao Leibe50a8d2015-01-20 15:11:42 +08001355 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
Zhao Lei10f11902015-01-20 15:11:43 +08001356
Miao Xieaf8e2d12014-10-23 14:42:50 +08001357 for (mirror_index = 0; mirror_index < nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001358 mirror_index++) {
1359 struct scrub_block *sblock;
1360 struct scrub_page *page;
1361
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001362 sblock = sblocks_for_recheck + mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001363 sblock->sctx = sctx;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001364
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001365 page = kzalloc(sizeof(*page), GFP_NOFS);
1366 if (!page) {
1367leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001368 spin_lock(&sctx->stat_lock);
1369 sctx->stat.malloc_errors++;
1370 spin_unlock(&sctx->stat_lock);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001371 scrub_put_recover(recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001372 return -ENOMEM;
1373 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001374 scrub_page_get(page);
1375 sblock->pagev[page_index] = page;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001376 page->sblock = sblock;
1377 page->flags = flags;
1378 page->generation = generation;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001379 page->logical = logical;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001380 page->have_csum = have_csum;
1381 if (have_csum)
1382 memcpy(page->csum,
1383 original_sblock->pagev[0]->csum,
1384 sctx->csum_size);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001385
Zhao Lei10f11902015-01-20 15:11:43 +08001386 scrub_stripe_index_and_offset(logical,
1387 bbio->map_type,
1388 bbio->raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001389 mapped_length,
Zhao Leie34c3302015-01-20 15:11:31 +08001390 bbio->num_stripes -
1391 bbio->num_tgtdevs,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001392 mirror_index,
1393 &stripe_index,
1394 &stripe_offset);
1395 page->physical = bbio->stripes[stripe_index].physical +
1396 stripe_offset;
1397 page->dev = bbio->stripes[stripe_index].dev;
1398
Stefan Behrensff023aa2012-11-06 11:43:11 +01001399 BUG_ON(page_index >= original_sblock->page_count);
1400 page->physical_for_dev_replace =
1401 original_sblock->pagev[page_index]->
1402 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001403 /* for missing devices, dev->bdev is NULL */
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001404 page->mirror_num = mirror_index + 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001405 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001406 page->page = alloc_page(GFP_NOFS);
1407 if (!page->page)
1408 goto leave_nomem;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001409
1410 scrub_get_recover(recover);
1411 page->recover = recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001412 }
Miao Xieaf8e2d12014-10-23 14:42:50 +08001413 scrub_put_recover(recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001414 length -= sublen;
1415 logical += sublen;
1416 page_index++;
1417 }
1418
1419 return 0;
1420}
1421
Miao Xieaf8e2d12014-10-23 14:42:50 +08001422struct scrub_bio_ret {
1423 struct completion event;
1424 int error;
1425};
1426
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001427static void scrub_bio_wait_endio(struct bio *bio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001428{
1429 struct scrub_bio_ret *ret = bio->bi_private;
1430
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001431 ret->error = bio->bi_error;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001432 complete(&ret->event);
1433}
1434
1435static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1436{
Zhao Lei10f11902015-01-20 15:11:43 +08001437 return page->recover &&
Zhao Leiffe2d202015-01-20 15:11:44 +08001438 (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001439}
1440
1441static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1442 struct bio *bio,
1443 struct scrub_page *page)
1444{
1445 struct scrub_bio_ret done;
1446 int ret;
1447
1448 init_completion(&done.event);
1449 done.error = 0;
1450 bio->bi_iter.bi_sector = page->logical >> 9;
1451 bio->bi_private = &done;
1452 bio->bi_end_io = scrub_bio_wait_endio;
1453
1454 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001455 page->recover->map_length,
Miao Xie42452152014-11-25 16:39:28 +08001456 page->mirror_num, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001457 if (ret)
1458 return ret;
1459
1460 wait_for_completion(&done.event);
1461 if (done.error)
1462 return -EIO;
1463
1464 return 0;
1465}
1466
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001467/*
1468 * this function will check the on disk data for checksum errors, header
1469 * errors and read I/O errors. If any I/O errors happen, the exact pages
1470 * which are errored are marked as being bad. The goal is to enable scrub
1471 * to take those pages that are not errored from all the mirrors so that
1472 * the pages that are errored in the just handled mirror can be repaired.
1473 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001474static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
Zhao Leiaffe4a52015-08-24 21:32:06 +08001475 struct scrub_block *sblock,
1476 int retry_failed_mirror)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001477{
1478 int page_num;
1479
1480 sblock->no_io_error_seen = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001481
1482 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1483 struct bio *bio;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001484 struct scrub_page *page = sblock->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001485
Stefan Behrens442a4f62012-05-25 16:06:08 +02001486 if (page->dev->bdev == NULL) {
Stefan Behrensea9947b2012-05-04 15:16:07 -04001487 page->io_error = 1;
1488 sblock->no_io_error_seen = 0;
1489 continue;
1490 }
1491
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001492 WARN_ON(!page->page);
Chris Mason9be33952013-05-17 18:30:14 -04001493 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001494 if (!bio) {
1495 page->io_error = 1;
1496 sblock->no_io_error_seen = 0;
1497 continue;
1498 }
Stefan Behrens442a4f62012-05-25 16:06:08 +02001499 bio->bi_bdev = page->dev->bdev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001500
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001501 bio_add_page(bio, page->page, PAGE_SIZE, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001502 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1503 if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1504 sblock->no_io_error_seen = 0;
1505 } else {
1506 bio->bi_iter.bi_sector = page->physical >> 9;
Mike Christie37226b22016-06-05 14:31:52 -05001507 bio_set_op_attrs(bio, REQ_OP_READ, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001508
Mike Christie4e49ea42016-06-05 14:31:41 -05001509 if (btrfsic_submit_bio_wait(bio))
Miao Xieaf8e2d12014-10-23 14:42:50 +08001510 sblock->no_io_error_seen = 0;
1511 }
Kent Overstreet33879d42013-11-23 22:33:32 -08001512
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001513 bio_put(bio);
1514 }
1515
1516 if (sblock->no_io_error_seen)
Zhao Leiba7cf982015-08-24 21:18:02 +08001517 scrub_recheck_block_checksum(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001518}
1519
Miao Xie17a9be22014-07-24 11:37:08 +08001520static inline int scrub_check_fsid(u8 fsid[],
1521 struct scrub_page *spage)
1522{
1523 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1524 int ret;
1525
1526 ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1527 return !ret;
1528}
1529
Zhao Leiba7cf982015-08-24 21:18:02 +08001530static void scrub_recheck_block_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001531{
Zhao Leiba7cf982015-08-24 21:18:02 +08001532 sblock->header_error = 0;
1533 sblock->checksum_error = 0;
1534 sblock->generation_error = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001535
Zhao Leiba7cf982015-08-24 21:18:02 +08001536 if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1537 scrub_checksum_data(sblock);
1538 else
1539 scrub_checksum_tree_block(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001540}
1541
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001542static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +08001543 struct scrub_block *sblock_good)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001544{
1545 int page_num;
1546 int ret = 0;
1547
1548 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1549 int ret_sub;
1550
1551 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1552 sblock_good,
Zhao Lei114ab502015-01-20 15:11:36 +08001553 page_num, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001554 if (ret_sub)
1555 ret = ret_sub;
1556 }
1557
1558 return ret;
1559}
1560
1561static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1562 struct scrub_block *sblock_good,
1563 int page_num, int force_write)
1564{
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001565 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1566 struct scrub_page *page_good = sblock_good->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001567
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001568 BUG_ON(page_bad->page == NULL);
1569 BUG_ON(page_good->page == NULL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001570 if (force_write || sblock_bad->header_error ||
1571 sblock_bad->checksum_error || page_bad->io_error) {
1572 struct bio *bio;
1573 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001574
Stefan Behrensff023aa2012-11-06 11:43:11 +01001575 if (!page_bad->dev->bdev) {
David Sterba94647322015-10-08 11:01:36 +02001576 btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
Frank Holtonefe120a2013-12-20 11:37:06 -05001577 "scrub_repair_page_from_good_copy(bdev == NULL) "
David Sterba94647322015-10-08 11:01:36 +02001578 "is unexpected");
Stefan Behrensff023aa2012-11-06 11:43:11 +01001579 return -EIO;
1580 }
1581
Chris Mason9be33952013-05-17 18:30:14 -04001582 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Tsutomu Itohe627ee72012-04-12 16:03:56 -04001583 if (!bio)
1584 return -EIO;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001585 bio->bi_bdev = page_bad->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001586 bio->bi_iter.bi_sector = page_bad->physical >> 9;
Mike Christie37226b22016-06-05 14:31:52 -05001587 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001588
1589 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1590 if (PAGE_SIZE != ret) {
1591 bio_put(bio);
1592 return -EIO;
1593 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001594
Mike Christie4e49ea42016-06-05 14:31:41 -05001595 if (btrfsic_submit_bio_wait(bio)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001596 btrfs_dev_stat_inc_and_print(page_bad->dev,
1597 BTRFS_DEV_STAT_WRITE_ERRS);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001598 btrfs_dev_replace_stats_inc(
1599 &sblock_bad->sctx->dev_root->fs_info->
1600 dev_replace.num_write_errors);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001601 bio_put(bio);
1602 return -EIO;
1603 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001604 bio_put(bio);
1605 }
1606
1607 return 0;
1608}
1609
Stefan Behrensff023aa2012-11-06 11:43:11 +01001610static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1611{
1612 int page_num;
1613
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001614 /*
1615 * This block is used for the check of the parity on the source device,
1616 * so the data needn't be written into the destination device.
1617 */
1618 if (sblock->sparity)
1619 return;
1620
Stefan Behrensff023aa2012-11-06 11:43:11 +01001621 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1622 int ret;
1623
1624 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1625 if (ret)
1626 btrfs_dev_replace_stats_inc(
1627 &sblock->sctx->dev_root->fs_info->dev_replace.
1628 num_write_errors);
1629 }
1630}
1631
1632static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1633 int page_num)
1634{
1635 struct scrub_page *spage = sblock->pagev[page_num];
1636
1637 BUG_ON(spage->page == NULL);
1638 if (spage->io_error) {
1639 void *mapped_buffer = kmap_atomic(spage->page);
1640
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001641 memset(mapped_buffer, 0, PAGE_SIZE);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001642 flush_dcache_page(spage->page);
1643 kunmap_atomic(mapped_buffer);
1644 }
1645 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1646}
1647
1648static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1649 struct scrub_page *spage)
1650{
1651 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1652 struct scrub_bio *sbio;
1653 int ret;
1654
1655 mutex_lock(&wr_ctx->wr_lock);
1656again:
1657 if (!wr_ctx->wr_curr_bio) {
1658 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
David Sterba58c4e172016-02-11 10:49:42 +01001659 GFP_KERNEL);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001660 if (!wr_ctx->wr_curr_bio) {
1661 mutex_unlock(&wr_ctx->wr_lock);
1662 return -ENOMEM;
1663 }
1664 wr_ctx->wr_curr_bio->sctx = sctx;
1665 wr_ctx->wr_curr_bio->page_count = 0;
1666 }
1667 sbio = wr_ctx->wr_curr_bio;
1668 if (sbio->page_count == 0) {
1669 struct bio *bio;
1670
1671 sbio->physical = spage->physical_for_dev_replace;
1672 sbio->logical = spage->logical;
1673 sbio->dev = wr_ctx->tgtdev;
1674 bio = sbio->bio;
1675 if (!bio) {
David Sterba58c4e172016-02-11 10:49:42 +01001676 bio = btrfs_io_bio_alloc(GFP_KERNEL,
1677 wr_ctx->pages_per_wr_bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001678 if (!bio) {
1679 mutex_unlock(&wr_ctx->wr_lock);
1680 return -ENOMEM;
1681 }
1682 sbio->bio = bio;
1683 }
1684
1685 bio->bi_private = sbio;
1686 bio->bi_end_io = scrub_wr_bio_end_io;
1687 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001688 bio->bi_iter.bi_sector = sbio->physical >> 9;
Mike Christie37226b22016-06-05 14:31:52 -05001689 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001690 sbio->err = 0;
1691 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1692 spage->physical_for_dev_replace ||
1693 sbio->logical + sbio->page_count * PAGE_SIZE !=
1694 spage->logical) {
1695 scrub_wr_submit(sctx);
1696 goto again;
1697 }
1698
1699 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1700 if (ret != PAGE_SIZE) {
1701 if (sbio->page_count < 1) {
1702 bio_put(sbio->bio);
1703 sbio->bio = NULL;
1704 mutex_unlock(&wr_ctx->wr_lock);
1705 return -EIO;
1706 }
1707 scrub_wr_submit(sctx);
1708 goto again;
1709 }
1710
1711 sbio->pagev[sbio->page_count] = spage;
1712 scrub_page_get(spage);
1713 sbio->page_count++;
1714 if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1715 scrub_wr_submit(sctx);
1716 mutex_unlock(&wr_ctx->wr_lock);
1717
1718 return 0;
1719}
1720
1721static void scrub_wr_submit(struct scrub_ctx *sctx)
1722{
1723 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1724 struct scrub_bio *sbio;
1725
1726 if (!wr_ctx->wr_curr_bio)
1727 return;
1728
1729 sbio = wr_ctx->wr_curr_bio;
1730 wr_ctx->wr_curr_bio = NULL;
1731 WARN_ON(!sbio->bio->bi_bdev);
1732 scrub_pending_bio_inc(sctx);
1733 /* process all writes in a single worker thread. Then the block layer
1734 * orders the requests before sending them to the driver which
1735 * doubled the write performance on spinning disks when measured
1736 * with Linux 3.5 */
Mike Christie4e49ea42016-06-05 14:31:41 -05001737 btrfsic_submit_bio(sbio->bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001738}
1739
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001740static void scrub_wr_bio_end_io(struct bio *bio)
Stefan Behrensff023aa2012-11-06 11:43:11 +01001741{
1742 struct scrub_bio *sbio = bio->bi_private;
1743 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1744
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001745 sbio->err = bio->bi_error;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001746 sbio->bio = bio;
1747
Liu Bo9e0af232014-08-15 23:36:53 +08001748 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1749 scrub_wr_bio_end_io_worker, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001750 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001751}
1752
1753static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1754{
1755 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1756 struct scrub_ctx *sctx = sbio->sctx;
1757 int i;
1758
1759 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1760 if (sbio->err) {
1761 struct btrfs_dev_replace *dev_replace =
1762 &sbio->sctx->dev_root->fs_info->dev_replace;
1763
1764 for (i = 0; i < sbio->page_count; i++) {
1765 struct scrub_page *spage = sbio->pagev[i];
1766
1767 spage->io_error = 1;
1768 btrfs_dev_replace_stats_inc(&dev_replace->
1769 num_write_errors);
1770 }
1771 }
1772
1773 for (i = 0; i < sbio->page_count; i++)
1774 scrub_page_put(sbio->pagev[i]);
1775
1776 bio_put(sbio->bio);
1777 kfree(sbio);
1778 scrub_pending_bio_dec(sctx);
1779}
1780
1781static int scrub_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001782{
1783 u64 flags;
1784 int ret;
1785
Zhao Leiba7cf982015-08-24 21:18:02 +08001786 /*
1787 * No need to initialize these stats currently,
1788 * because this function only use return value
1789 * instead of these stats value.
1790 *
1791 * Todo:
1792 * always use stats
1793 */
1794 sblock->header_error = 0;
1795 sblock->generation_error = 0;
1796 sblock->checksum_error = 0;
1797
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001798 WARN_ON(sblock->page_count < 1);
1799 flags = sblock->pagev[0]->flags;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001800 ret = 0;
1801 if (flags & BTRFS_EXTENT_FLAG_DATA)
1802 ret = scrub_checksum_data(sblock);
1803 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1804 ret = scrub_checksum_tree_block(sblock);
1805 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1806 (void)scrub_checksum_super(sblock);
1807 else
1808 WARN_ON(1);
1809 if (ret)
1810 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001811
1812 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001813}
1814
1815static int scrub_checksum_data(struct scrub_block *sblock)
1816{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001817 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001818 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001819 u8 *on_disk_csum;
1820 struct page *page;
1821 void *buffer;
Arne Jansena2de7332011-03-08 14:14:00 +01001822 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001823 u64 len;
1824 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001825
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001826 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001827 if (!sblock->pagev[0]->have_csum)
Arne Jansena2de7332011-03-08 14:14:00 +01001828 return 0;
1829
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001830 on_disk_csum = sblock->pagev[0]->csum;
1831 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001832 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001833
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001834 len = sctx->sectorsize;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001835 index = 0;
1836 for (;;) {
1837 u64 l = min_t(u64, len, PAGE_SIZE);
1838
Liu Bob0496682013-03-14 14:57:45 +00001839 crc = btrfs_csum_data(buffer, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001840 kunmap_atomic(buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001841 len -= l;
1842 if (len == 0)
1843 break;
1844 index++;
1845 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001846 BUG_ON(!sblock->pagev[index]->page);
1847 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001848 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001849 }
1850
Arne Jansena2de7332011-03-08 14:14:00 +01001851 btrfs_csum_final(crc, csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001852 if (memcmp(csum, on_disk_csum, sctx->csum_size))
Zhao Leiba7cf982015-08-24 21:18:02 +08001853 sblock->checksum_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001854
Zhao Leiba7cf982015-08-24 21:18:02 +08001855 return sblock->checksum_error;
Arne Jansena2de7332011-03-08 14:14:00 +01001856}
1857
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001858static int scrub_checksum_tree_block(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001859{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001860 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001861 struct btrfs_header *h;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001862 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01001863 struct btrfs_fs_info *fs_info = root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001864 u8 calculated_csum[BTRFS_CSUM_SIZE];
1865 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1866 struct page *page;
1867 void *mapped_buffer;
1868 u64 mapped_size;
1869 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001870 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001871 u64 len;
1872 int index;
1873
1874 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001875 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001876 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001877 h = (struct btrfs_header *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001878 memcpy(on_disk_csum, h->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001879
1880 /*
1881 * we don't use the getter functions here, as we
1882 * a) don't have an extent buffer and
1883 * b) the page is already kmapped
1884 */
Qu Wenruo3cae2102013-07-16 11:19:18 +08001885 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
Zhao Leiba7cf982015-08-24 21:18:02 +08001886 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001887
Zhao Leiba7cf982015-08-24 21:18:02 +08001888 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
1889 sblock->header_error = 1;
1890 sblock->generation_error = 1;
1891 }
Arne Jansena2de7332011-03-08 14:14:00 +01001892
Miao Xie17a9be22014-07-24 11:37:08 +08001893 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
Zhao Leiba7cf982015-08-24 21:18:02 +08001894 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001895
1896 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1897 BTRFS_UUID_SIZE))
Zhao Leiba7cf982015-08-24 21:18:02 +08001898 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001899
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001900 len = sctx->nodesize - BTRFS_CSUM_SIZE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001901 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1902 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1903 index = 0;
1904 for (;;) {
1905 u64 l = min_t(u64, len, mapped_size);
1906
Liu Bob0496682013-03-14 14:57:45 +00001907 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001908 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001909 len -= l;
1910 if (len == 0)
1911 break;
1912 index++;
1913 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001914 BUG_ON(!sblock->pagev[index]->page);
1915 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001916 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001917 mapped_size = PAGE_SIZE;
1918 p = mapped_buffer;
1919 }
1920
1921 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001922 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Zhao Leiba7cf982015-08-24 21:18:02 +08001923 sblock->checksum_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001924
Zhao Leiba7cf982015-08-24 21:18:02 +08001925 return sblock->header_error || sblock->checksum_error;
Arne Jansena2de7332011-03-08 14:14:00 +01001926}
1927
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001928static int scrub_checksum_super(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001929{
1930 struct btrfs_super_block *s;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001931 struct scrub_ctx *sctx = sblock->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001932 u8 calculated_csum[BTRFS_CSUM_SIZE];
1933 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1934 struct page *page;
1935 void *mapped_buffer;
1936 u64 mapped_size;
1937 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001938 u32 crc = ~(u32)0;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001939 int fail_gen = 0;
1940 int fail_cor = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001941 u64 len;
1942 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001943
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001944 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001945 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001946 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001947 s = (struct btrfs_super_block *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001948 memcpy(on_disk_csum, s->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001949
Qu Wenruo3cae2102013-07-16 11:19:18 +08001950 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001951 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001952
Qu Wenruo3cae2102013-07-16 11:19:18 +08001953 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001954 ++fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01001955
Miao Xie17a9be22014-07-24 11:37:08 +08001956 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001957 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001958
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001959 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1960 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1961 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1962 index = 0;
1963 for (;;) {
1964 u64 l = min_t(u64, len, mapped_size);
1965
Liu Bob0496682013-03-14 14:57:45 +00001966 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001967 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001968 len -= l;
1969 if (len == 0)
1970 break;
1971 index++;
1972 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001973 BUG_ON(!sblock->pagev[index]->page);
1974 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001975 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001976 mapped_size = PAGE_SIZE;
1977 p = mapped_buffer;
1978 }
1979
1980 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001981 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001982 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001983
Stefan Behrens442a4f62012-05-25 16:06:08 +02001984 if (fail_cor + fail_gen) {
Arne Jansena2de7332011-03-08 14:14:00 +01001985 /*
1986 * if we find an error in a super block, we just report it.
1987 * They will get written with the next transaction commit
1988 * anyway
1989 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001990 spin_lock(&sctx->stat_lock);
1991 ++sctx->stat.super_errors;
1992 spin_unlock(&sctx->stat_lock);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001993 if (fail_cor)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001994 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001995 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1996 else
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001997 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001998 BTRFS_DEV_STAT_GENERATION_ERRS);
Arne Jansena2de7332011-03-08 14:14:00 +01001999 }
2000
Stefan Behrens442a4f62012-05-25 16:06:08 +02002001 return fail_cor + fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002002}
2003
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002004static void scrub_block_get(struct scrub_block *sblock)
2005{
Zhao Lei57019342015-01-20 15:11:45 +08002006 atomic_inc(&sblock->refs);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002007}
2008
2009static void scrub_block_put(struct scrub_block *sblock)
2010{
Zhao Lei57019342015-01-20 15:11:45 +08002011 if (atomic_dec_and_test(&sblock->refs)) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002012 int i;
2013
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002014 if (sblock->sparity)
2015 scrub_parity_put(sblock->sparity);
2016
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002017 for (i = 0; i < sblock->page_count; i++)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002018 scrub_page_put(sblock->pagev[i]);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002019 kfree(sblock);
2020 }
2021}
2022
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002023static void scrub_page_get(struct scrub_page *spage)
2024{
Zhao Lei57019342015-01-20 15:11:45 +08002025 atomic_inc(&spage->refs);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002026}
2027
2028static void scrub_page_put(struct scrub_page *spage)
2029{
Zhao Lei57019342015-01-20 15:11:45 +08002030 if (atomic_dec_and_test(&spage->refs)) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002031 if (spage->page)
2032 __free_page(spage->page);
2033 kfree(spage);
2034 }
2035}
2036
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002037static void scrub_submit(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +01002038{
2039 struct scrub_bio *sbio;
2040
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002041 if (sctx->curr == -1)
Stefan Behrens1623ede2012-03-27 14:21:26 -04002042 return;
Arne Jansena2de7332011-03-08 14:14:00 +01002043
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002044 sbio = sctx->bios[sctx->curr];
2045 sctx->curr = -1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002046 scrub_pending_bio_inc(sctx);
Mike Christie4e49ea42016-06-05 14:31:41 -05002047 btrfsic_submit_bio(sbio->bio);
Arne Jansena2de7332011-03-08 14:14:00 +01002048}
2049
Stefan Behrensff023aa2012-11-06 11:43:11 +01002050static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2051 struct scrub_page *spage)
Arne Jansena2de7332011-03-08 14:14:00 +01002052{
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002053 struct scrub_block *sblock = spage->sblock;
Arne Jansena2de7332011-03-08 14:14:00 +01002054 struct scrub_bio *sbio;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002055 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +01002056
2057again:
2058 /*
2059 * grab a fresh bio or wait for one to become available
2060 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002061 while (sctx->curr == -1) {
2062 spin_lock(&sctx->list_lock);
2063 sctx->curr = sctx->first_free;
2064 if (sctx->curr != -1) {
2065 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2066 sctx->bios[sctx->curr]->next_free = -1;
2067 sctx->bios[sctx->curr]->page_count = 0;
2068 spin_unlock(&sctx->list_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002069 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002070 spin_unlock(&sctx->list_lock);
2071 wait_event(sctx->list_wait, sctx->first_free != -1);
Arne Jansena2de7332011-03-08 14:14:00 +01002072 }
2073 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002074 sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002075 if (sbio->page_count == 0) {
Arne Jansen69f4cb52011-11-11 08:17:10 -05002076 struct bio *bio;
2077
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002078 sbio->physical = spage->physical;
2079 sbio->logical = spage->logical;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002080 sbio->dev = spage->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002081 bio = sbio->bio;
2082 if (!bio) {
David Sterba58c4e172016-02-11 10:49:42 +01002083 bio = btrfs_io_bio_alloc(GFP_KERNEL,
2084 sctx->pages_per_rd_bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002085 if (!bio)
2086 return -ENOMEM;
2087 sbio->bio = bio;
2088 }
Arne Jansen69f4cb52011-11-11 08:17:10 -05002089
2090 bio->bi_private = sbio;
2091 bio->bi_end_io = scrub_bio_end_io;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002092 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002093 bio->bi_iter.bi_sector = sbio->physical >> 9;
Mike Christie37226b22016-06-05 14:31:52 -05002094 bio_set_op_attrs(bio, REQ_OP_READ, 0);
Arne Jansen69f4cb52011-11-11 08:17:10 -05002095 sbio->err = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002096 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2097 spage->physical ||
2098 sbio->logical + sbio->page_count * PAGE_SIZE !=
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002099 spage->logical ||
2100 sbio->dev != spage->dev) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002101 scrub_submit(sctx);
Arne Jansen69f4cb52011-11-11 08:17:10 -05002102 goto again;
2103 }
2104
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002105 sbio->pagev[sbio->page_count] = spage;
2106 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2107 if (ret != PAGE_SIZE) {
2108 if (sbio->page_count < 1) {
2109 bio_put(sbio->bio);
2110 sbio->bio = NULL;
2111 return -EIO;
2112 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002113 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002114 goto again;
Arne Jansena2de7332011-03-08 14:14:00 +01002115 }
Arne Jansen1bc87792011-05-28 21:57:55 +02002116
Stefan Behrensff023aa2012-11-06 11:43:11 +01002117 scrub_block_get(sblock); /* one for the page added to the bio */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002118 atomic_inc(&sblock->outstanding_pages);
2119 sbio->page_count++;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002120 if (sbio->page_count == sctx->pages_per_rd_bio)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002121 scrub_submit(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002122
2123 return 0;
2124}
2125
Linus Torvalds22365972015-09-05 15:14:43 -07002126static void scrub_missing_raid56_end_io(struct bio *bio)
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002127{
2128 struct scrub_block *sblock = bio->bi_private;
2129 struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
2130
Linus Torvalds22365972015-09-05 15:14:43 -07002131 if (bio->bi_error)
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002132 sblock->no_io_error_seen = 0;
2133
Scott Talbert46732722016-05-09 09:14:28 -04002134 bio_put(bio);
2135
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002136 btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2137}
2138
2139static void scrub_missing_raid56_worker(struct btrfs_work *work)
2140{
2141 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2142 struct scrub_ctx *sctx = sblock->sctx;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002143 u64 logical;
2144 struct btrfs_device *dev;
2145
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002146 logical = sblock->pagev[0]->logical;
2147 dev = sblock->pagev[0]->dev;
2148
Zhao Leiaffe4a52015-08-24 21:32:06 +08002149 if (sblock->no_io_error_seen)
Zhao Leiba7cf982015-08-24 21:18:02 +08002150 scrub_recheck_block_checksum(sblock);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002151
2152 if (!sblock->no_io_error_seen) {
2153 spin_lock(&sctx->stat_lock);
2154 sctx->stat.read_errors++;
2155 spin_unlock(&sctx->stat_lock);
Zhao Leiba7cf982015-08-24 21:18:02 +08002156 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02002157 "IO error rebuilding logical %llu for dev %s",
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002158 logical, rcu_str_deref(dev->name));
2159 } else if (sblock->header_error || sblock->checksum_error) {
2160 spin_lock(&sctx->stat_lock);
2161 sctx->stat.uncorrectable_errors++;
2162 spin_unlock(&sctx->stat_lock);
Zhao Leiba7cf982015-08-24 21:18:02 +08002163 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02002164 "failed to rebuild valid logical %llu for dev %s",
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002165 logical, rcu_str_deref(dev->name));
2166 } else {
2167 scrub_write_block_to_dev_replace(sblock);
2168 }
2169
2170 scrub_block_put(sblock);
2171
2172 if (sctx->is_dev_replace &&
2173 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2174 mutex_lock(&sctx->wr_ctx.wr_lock);
2175 scrub_wr_submit(sctx);
2176 mutex_unlock(&sctx->wr_ctx.wr_lock);
2177 }
2178
2179 scrub_pending_bio_dec(sctx);
2180}
2181
2182static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2183{
2184 struct scrub_ctx *sctx = sblock->sctx;
2185 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2186 u64 length = sblock->page_count * PAGE_SIZE;
2187 u64 logical = sblock->pagev[0]->logical;
Zhao Leif1fee652016-05-17 17:37:38 +08002188 struct btrfs_bio *bbio = NULL;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002189 struct bio *bio;
2190 struct btrfs_raid_bio *rbio;
2191 int ret;
2192 int i;
2193
2194 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
2195 &bbio, 0, 1);
2196 if (ret || !bbio || !bbio->raid_map)
2197 goto bbio_out;
2198
2199 if (WARN_ON(!sctx->is_dev_replace ||
2200 !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2201 /*
2202 * We shouldn't be scrubbing a missing device. Even for dev
2203 * replace, we should only get here for RAID 5/6. We either
2204 * managed to mount something with no mirrors remaining or
2205 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2206 */
2207 goto bbio_out;
2208 }
2209
2210 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2211 if (!bio)
2212 goto bbio_out;
2213
2214 bio->bi_iter.bi_sector = logical >> 9;
2215 bio->bi_private = sblock;
2216 bio->bi_end_io = scrub_missing_raid56_end_io;
2217
2218 rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
2219 if (!rbio)
2220 goto rbio_out;
2221
2222 for (i = 0; i < sblock->page_count; i++) {
2223 struct scrub_page *spage = sblock->pagev[i];
2224
2225 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2226 }
2227
2228 btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2229 scrub_missing_raid56_worker, NULL, NULL);
2230 scrub_block_get(sblock);
2231 scrub_pending_bio_inc(sctx);
2232 raid56_submit_missing_rbio(rbio);
2233 return;
2234
2235rbio_out:
2236 bio_put(bio);
2237bbio_out:
2238 btrfs_put_bbio(bbio);
2239 spin_lock(&sctx->stat_lock);
2240 sctx->stat.malloc_errors++;
2241 spin_unlock(&sctx->stat_lock);
2242}
2243
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002244static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002245 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002246 u64 gen, int mirror_num, u8 *csum, int force,
2247 u64 physical_for_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002248{
2249 struct scrub_block *sblock;
2250 int index;
2251
David Sterba58c4e172016-02-11 10:49:42 +01002252 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002253 if (!sblock) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002254 spin_lock(&sctx->stat_lock);
2255 sctx->stat.malloc_errors++;
2256 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002257 return -ENOMEM;
2258 }
2259
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002260 /* one ref inside this function, plus one for each page added to
2261 * a bio later on */
Zhao Lei57019342015-01-20 15:11:45 +08002262 atomic_set(&sblock->refs, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002263 sblock->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002264 sblock->no_io_error_seen = 1;
2265
2266 for (index = 0; len > 0; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002267 struct scrub_page *spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002268 u64 l = min_t(u64, len, PAGE_SIZE);
2269
David Sterba58c4e172016-02-11 10:49:42 +01002270 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002271 if (!spage) {
2272leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002273 spin_lock(&sctx->stat_lock);
2274 sctx->stat.malloc_errors++;
2275 spin_unlock(&sctx->stat_lock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002276 scrub_block_put(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002277 return -ENOMEM;
2278 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002279 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2280 scrub_page_get(spage);
2281 sblock->pagev[index] = spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002282 spage->sblock = sblock;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002283 spage->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002284 spage->flags = flags;
2285 spage->generation = gen;
2286 spage->logical = logical;
2287 spage->physical = physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002288 spage->physical_for_dev_replace = physical_for_dev_replace;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002289 spage->mirror_num = mirror_num;
2290 if (csum) {
2291 spage->have_csum = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002292 memcpy(spage->csum, csum, sctx->csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002293 } else {
2294 spage->have_csum = 0;
2295 }
2296 sblock->page_count++;
David Sterba58c4e172016-02-11 10:49:42 +01002297 spage->page = alloc_page(GFP_KERNEL);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002298 if (!spage->page)
2299 goto leave_nomem;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002300 len -= l;
2301 logical += l;
2302 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002303 physical_for_dev_replace += l;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002304 }
2305
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002306 WARN_ON(sblock->page_count == 0);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002307 if (dev->missing) {
2308 /*
2309 * This case should only be hit for RAID 5/6 device replace. See
2310 * the comment in scrub_missing_raid56_pages() for details.
2311 */
2312 scrub_missing_raid56_pages(sblock);
2313 } else {
2314 for (index = 0; index < sblock->page_count; index++) {
2315 struct scrub_page *spage = sblock->pagev[index];
2316 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002317
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002318 ret = scrub_add_page_to_rd_bio(sctx, spage);
2319 if (ret) {
2320 scrub_block_put(sblock);
2321 return ret;
2322 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002323 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002324
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002325 if (force)
2326 scrub_submit(sctx);
2327 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002328
2329 /* last one frees, either here or in bio completion for last page */
2330 scrub_block_put(sblock);
2331 return 0;
2332}
2333
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002334static void scrub_bio_end_io(struct bio *bio)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002335{
2336 struct scrub_bio *sbio = bio->bi_private;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002337 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002338
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002339 sbio->err = bio->bi_error;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002340 sbio->bio = bio;
2341
Qu Wenruo0339ef22014-02-28 10:46:17 +08002342 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002343}
2344
2345static void scrub_bio_end_io_worker(struct btrfs_work *work)
2346{
2347 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002348 struct scrub_ctx *sctx = sbio->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002349 int i;
2350
Stefan Behrensff023aa2012-11-06 11:43:11 +01002351 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002352 if (sbio->err) {
2353 for (i = 0; i < sbio->page_count; i++) {
2354 struct scrub_page *spage = sbio->pagev[i];
2355
2356 spage->io_error = 1;
2357 spage->sblock->no_io_error_seen = 0;
2358 }
2359 }
2360
2361 /* now complete the scrub_block items that have all pages completed */
2362 for (i = 0; i < sbio->page_count; i++) {
2363 struct scrub_page *spage = sbio->pagev[i];
2364 struct scrub_block *sblock = spage->sblock;
2365
2366 if (atomic_dec_and_test(&sblock->outstanding_pages))
2367 scrub_block_complete(sblock);
2368 scrub_block_put(sblock);
2369 }
2370
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002371 bio_put(sbio->bio);
2372 sbio->bio = NULL;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002373 spin_lock(&sctx->list_lock);
2374 sbio->next_free = sctx->first_free;
2375 sctx->first_free = sbio->index;
2376 spin_unlock(&sctx->list_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002377
2378 if (sctx->is_dev_replace &&
2379 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2380 mutex_lock(&sctx->wr_ctx.wr_lock);
2381 scrub_wr_submit(sctx);
2382 mutex_unlock(&sctx->wr_ctx.wr_lock);
2383 }
2384
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002385 scrub_pending_bio_dec(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002386}
2387
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002388static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2389 unsigned long *bitmap,
2390 u64 start, u64 len)
2391{
David Sterba9d644a62015-02-20 18:42:11 +01002392 u32 offset;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002393 int nsectors;
2394 int sectorsize = sparity->sctx->dev_root->sectorsize;
2395
2396 if (len >= sparity->stripe_len) {
2397 bitmap_set(bitmap, 0, sparity->nsectors);
2398 return;
2399 }
2400
2401 start -= sparity->logic_start;
David Sterba47c57132015-02-20 18:43:47 +01002402 start = div_u64_rem(start, sparity->stripe_len, &offset);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002403 offset /= sectorsize;
2404 nsectors = (int)len / sectorsize;
2405
2406 if (offset + nsectors <= sparity->nsectors) {
2407 bitmap_set(bitmap, offset, nsectors);
2408 return;
2409 }
2410
2411 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2412 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2413}
2414
2415static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2416 u64 start, u64 len)
2417{
2418 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2419}
2420
2421static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2422 u64 start, u64 len)
2423{
2424 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2425}
2426
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002427static void scrub_block_complete(struct scrub_block *sblock)
2428{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002429 int corrupted = 0;
2430
Stefan Behrensff023aa2012-11-06 11:43:11 +01002431 if (!sblock->no_io_error_seen) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002432 corrupted = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002433 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002434 } else {
2435 /*
2436 * if has checksum error, write via repair mechanism in
2437 * dev replace case, otherwise write here in dev replace
2438 * case.
2439 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002440 corrupted = scrub_checksum(sblock);
2441 if (!corrupted && sblock->sctx->is_dev_replace)
Stefan Behrensff023aa2012-11-06 11:43:11 +01002442 scrub_write_block_to_dev_replace(sblock);
2443 }
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002444
2445 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2446 u64 start = sblock->pagev[0]->logical;
2447 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2448 PAGE_SIZE;
2449
2450 scrub_parity_mark_sectors_error(sblock->sparity,
2451 start, end - start);
2452 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002453}
2454
Zhao Lei3b5753e2015-08-24 22:03:02 +08002455static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
Arne Jansena2de7332011-03-08 14:14:00 +01002456{
2457 struct btrfs_ordered_sum *sum = NULL;
Miao Xief51a4a12013-06-19 10:36:09 +08002458 unsigned long index;
Arne Jansena2de7332011-03-08 14:14:00 +01002459 unsigned long num_sectors;
Arne Jansena2de7332011-03-08 14:14:00 +01002460
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002461 while (!list_empty(&sctx->csum_list)) {
2462 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +01002463 struct btrfs_ordered_sum, list);
2464 if (sum->bytenr > logical)
2465 return 0;
2466 if (sum->bytenr + sum->len > logical)
2467 break;
2468
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002469 ++sctx->stat.csum_discards;
Arne Jansena2de7332011-03-08 14:14:00 +01002470 list_del(&sum->list);
2471 kfree(sum);
2472 sum = NULL;
2473 }
2474 if (!sum)
2475 return 0;
2476
Miao Xief51a4a12013-06-19 10:36:09 +08002477 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002478 num_sectors = sum->len / sctx->sectorsize;
Miao Xief51a4a12013-06-19 10:36:09 +08002479 memcpy(csum, sum->sums + index, sctx->csum_size);
2480 if (index == num_sectors - 1) {
Arne Jansena2de7332011-03-08 14:14:00 +01002481 list_del(&sum->list);
2482 kfree(sum);
2483 }
Miao Xief51a4a12013-06-19 10:36:09 +08002484 return 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002485}
2486
2487/* scrub extent tries to collect up to 64 kB for each bio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002488static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002489 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002490 u64 gen, int mirror_num, u64 physical_for_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002491{
2492 int ret;
2493 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002494 u32 blocksize;
2495
2496 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002497 blocksize = sctx->sectorsize;
2498 spin_lock(&sctx->stat_lock);
2499 sctx->stat.data_extents_scrubbed++;
2500 sctx->stat.data_bytes_scrubbed += len;
2501 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002502 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002503 blocksize = sctx->nodesize;
2504 spin_lock(&sctx->stat_lock);
2505 sctx->stat.tree_extents_scrubbed++;
2506 sctx->stat.tree_bytes_scrubbed += len;
2507 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002508 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002509 blocksize = sctx->sectorsize;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002510 WARN_ON(1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002511 }
Arne Jansena2de7332011-03-08 14:14:00 +01002512
2513 while (len) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002514 u64 l = min_t(u64, len, blocksize);
Arne Jansena2de7332011-03-08 14:14:00 +01002515 int have_csum = 0;
2516
2517 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2518 /* push csums to sbio */
Zhao Lei3b5753e2015-08-24 22:03:02 +08002519 have_csum = scrub_find_csum(sctx, logical, csum);
Arne Jansena2de7332011-03-08 14:14:00 +01002520 if (have_csum == 0)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002521 ++sctx->stat.no_csum;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002522 if (sctx->is_dev_replace && !have_csum) {
2523 ret = copy_nocow_pages(sctx, logical, l,
2524 mirror_num,
2525 physical_for_dev_replace);
2526 goto behind_scrub_pages;
2527 }
Arne Jansena2de7332011-03-08 14:14:00 +01002528 }
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002529 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002530 mirror_num, have_csum ? csum : NULL, 0,
2531 physical_for_dev_replace);
2532behind_scrub_pages:
Arne Jansena2de7332011-03-08 14:14:00 +01002533 if (ret)
2534 return ret;
2535 len -= l;
2536 logical += l;
2537 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002538 physical_for_dev_replace += l;
Arne Jansena2de7332011-03-08 14:14:00 +01002539 }
2540 return 0;
2541}
2542
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002543static int scrub_pages_for_parity(struct scrub_parity *sparity,
2544 u64 logical, u64 len,
2545 u64 physical, struct btrfs_device *dev,
2546 u64 flags, u64 gen, int mirror_num, u8 *csum)
2547{
2548 struct scrub_ctx *sctx = sparity->sctx;
2549 struct scrub_block *sblock;
2550 int index;
2551
David Sterba58c4e172016-02-11 10:49:42 +01002552 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002553 if (!sblock) {
2554 spin_lock(&sctx->stat_lock);
2555 sctx->stat.malloc_errors++;
2556 spin_unlock(&sctx->stat_lock);
2557 return -ENOMEM;
2558 }
2559
2560 /* one ref inside this function, plus one for each page added to
2561 * a bio later on */
Zhao Lei57019342015-01-20 15:11:45 +08002562 atomic_set(&sblock->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002563 sblock->sctx = sctx;
2564 sblock->no_io_error_seen = 1;
2565 sblock->sparity = sparity;
2566 scrub_parity_get(sparity);
2567
2568 for (index = 0; len > 0; index++) {
2569 struct scrub_page *spage;
2570 u64 l = min_t(u64, len, PAGE_SIZE);
2571
David Sterba58c4e172016-02-11 10:49:42 +01002572 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002573 if (!spage) {
2574leave_nomem:
2575 spin_lock(&sctx->stat_lock);
2576 sctx->stat.malloc_errors++;
2577 spin_unlock(&sctx->stat_lock);
2578 scrub_block_put(sblock);
2579 return -ENOMEM;
2580 }
2581 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2582 /* For scrub block */
2583 scrub_page_get(spage);
2584 sblock->pagev[index] = spage;
2585 /* For scrub parity */
2586 scrub_page_get(spage);
2587 list_add_tail(&spage->list, &sparity->spages);
2588 spage->sblock = sblock;
2589 spage->dev = dev;
2590 spage->flags = flags;
2591 spage->generation = gen;
2592 spage->logical = logical;
2593 spage->physical = physical;
2594 spage->mirror_num = mirror_num;
2595 if (csum) {
2596 spage->have_csum = 1;
2597 memcpy(spage->csum, csum, sctx->csum_size);
2598 } else {
2599 spage->have_csum = 0;
2600 }
2601 sblock->page_count++;
David Sterba58c4e172016-02-11 10:49:42 +01002602 spage->page = alloc_page(GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002603 if (!spage->page)
2604 goto leave_nomem;
2605 len -= l;
2606 logical += l;
2607 physical += l;
2608 }
2609
2610 WARN_ON(sblock->page_count == 0);
2611 for (index = 0; index < sblock->page_count; index++) {
2612 struct scrub_page *spage = sblock->pagev[index];
2613 int ret;
2614
2615 ret = scrub_add_page_to_rd_bio(sctx, spage);
2616 if (ret) {
2617 scrub_block_put(sblock);
2618 return ret;
2619 }
2620 }
2621
2622 /* last one frees, either here or in bio completion for last page */
2623 scrub_block_put(sblock);
2624 return 0;
2625}
2626
2627static int scrub_extent_for_parity(struct scrub_parity *sparity,
2628 u64 logical, u64 len,
2629 u64 physical, struct btrfs_device *dev,
2630 u64 flags, u64 gen, int mirror_num)
2631{
2632 struct scrub_ctx *sctx = sparity->sctx;
2633 int ret;
2634 u8 csum[BTRFS_CSUM_SIZE];
2635 u32 blocksize;
2636
Omar Sandoval4a770892015-06-19 11:52:52 -07002637 if (dev->missing) {
2638 scrub_parity_mark_sectors_error(sparity, logical, len);
2639 return 0;
2640 }
2641
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002642 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2643 blocksize = sctx->sectorsize;
2644 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2645 blocksize = sctx->nodesize;
2646 } else {
2647 blocksize = sctx->sectorsize;
2648 WARN_ON(1);
2649 }
2650
2651 while (len) {
2652 u64 l = min_t(u64, len, blocksize);
2653 int have_csum = 0;
2654
2655 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2656 /* push csums to sbio */
Zhao Lei3b5753e2015-08-24 22:03:02 +08002657 have_csum = scrub_find_csum(sctx, logical, csum);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002658 if (have_csum == 0)
2659 goto skip;
2660 }
2661 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2662 flags, gen, mirror_num,
2663 have_csum ? csum : NULL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002664 if (ret)
2665 return ret;
Dan Carpenter6b6d24b2014-12-12 22:30:00 +03002666skip:
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002667 len -= l;
2668 logical += l;
2669 physical += l;
2670 }
2671 return 0;
2672}
2673
Wang Shilong3b080b22014-04-01 18:01:43 +08002674/*
2675 * Given a physical address, this will calculate it's
2676 * logical offset. if this is a parity stripe, it will return
2677 * the most left data stripe's logical offset.
2678 *
2679 * return 0 if it is a data stripe, 1 means parity stripe.
2680 */
2681static int get_raid56_logic_offset(u64 physical, int num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002682 struct map_lookup *map, u64 *offset,
2683 u64 *stripe_start)
Wang Shilong3b080b22014-04-01 18:01:43 +08002684{
2685 int i;
2686 int j = 0;
2687 u64 stripe_nr;
2688 u64 last_offset;
David Sterba9d644a62015-02-20 18:42:11 +01002689 u32 stripe_index;
2690 u32 rot;
Wang Shilong3b080b22014-04-01 18:01:43 +08002691
2692 last_offset = (physical - map->stripes[num].physical) *
2693 nr_data_stripes(map);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002694 if (stripe_start)
2695 *stripe_start = last_offset;
2696
Wang Shilong3b080b22014-04-01 18:01:43 +08002697 *offset = last_offset;
2698 for (i = 0; i < nr_data_stripes(map); i++) {
2699 *offset = last_offset + i * map->stripe_len;
2700
David Sterbab8b93ad2015-01-16 17:26:13 +01002701 stripe_nr = div_u64(*offset, map->stripe_len);
2702 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
Wang Shilong3b080b22014-04-01 18:01:43 +08002703
2704 /* Work out the disk rotation on this stripe-set */
David Sterba47c57132015-02-20 18:43:47 +01002705 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
Wang Shilong3b080b22014-04-01 18:01:43 +08002706 /* calculate which stripe this data locates */
2707 rot += i;
Wang Shilonge4fbaee2014-04-11 18:32:25 +08002708 stripe_index = rot % map->num_stripes;
Wang Shilong3b080b22014-04-01 18:01:43 +08002709 if (stripe_index == num)
2710 return 0;
2711 if (stripe_index < num)
2712 j++;
2713 }
2714 *offset = last_offset + j * map->stripe_len;
2715 return 1;
2716}
2717
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002718static void scrub_free_parity(struct scrub_parity *sparity)
2719{
2720 struct scrub_ctx *sctx = sparity->sctx;
2721 struct scrub_page *curr, *next;
2722 int nbits;
2723
2724 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2725 if (nbits) {
2726 spin_lock(&sctx->stat_lock);
2727 sctx->stat.read_errors += nbits;
2728 sctx->stat.uncorrectable_errors += nbits;
2729 spin_unlock(&sctx->stat_lock);
2730 }
2731
2732 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2733 list_del_init(&curr->list);
2734 scrub_page_put(curr);
2735 }
2736
2737 kfree(sparity);
2738}
2739
Zhao Lei20b2e302015-06-04 20:09:15 +08002740static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2741{
2742 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2743 work);
2744 struct scrub_ctx *sctx = sparity->sctx;
2745
2746 scrub_free_parity(sparity);
2747 scrub_pending_bio_dec(sctx);
2748}
2749
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002750static void scrub_parity_bio_endio(struct bio *bio)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002751{
2752 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002753
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002754 if (bio->bi_error)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002755 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2756 sparity->nsectors);
2757
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002758 bio_put(bio);
Zhao Lei20b2e302015-06-04 20:09:15 +08002759
2760 btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2761 scrub_parity_bio_endio_worker, NULL, NULL);
2762 btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
2763 &sparity->work);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002764}
2765
2766static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2767{
2768 struct scrub_ctx *sctx = sparity->sctx;
2769 struct bio *bio;
2770 struct btrfs_raid_bio *rbio;
2771 struct scrub_page *spage;
2772 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002773 u64 length;
2774 int ret;
2775
2776 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2777 sparity->nsectors))
2778 goto out;
2779
Zhao Leia0dd59d2015-07-21 15:42:26 +08002780 length = sparity->logic_end - sparity->logic_start;
Miao Xie76035972014-11-14 17:45:42 +08002781 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002782 sparity->logic_start,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08002783 &length, &bbio, 0, 1);
2784 if (ret || !bbio || !bbio->raid_map)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002785 goto bbio_out;
2786
2787 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2788 if (!bio)
2789 goto bbio_out;
2790
2791 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2792 bio->bi_private = sparity;
2793 bio->bi_end_io = scrub_parity_bio_endio;
2794
2795 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08002796 length, sparity->scrub_dev,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002797 sparity->dbitmap,
2798 sparity->nsectors);
2799 if (!rbio)
2800 goto rbio_out;
2801
2802 list_for_each_entry(spage, &sparity->spages, list)
Omar Sandovalb4ee1782015-06-19 11:52:50 -07002803 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002804
2805 scrub_pending_bio_inc(sctx);
2806 raid56_parity_submit_scrub_rbio(rbio);
2807 return;
2808
2809rbio_out:
2810 bio_put(bio);
2811bbio_out:
Zhao Lei6e9606d2015-01-20 15:11:34 +08002812 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002813 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2814 sparity->nsectors);
2815 spin_lock(&sctx->stat_lock);
2816 sctx->stat.malloc_errors++;
2817 spin_unlock(&sctx->stat_lock);
2818out:
2819 scrub_free_parity(sparity);
2820}
2821
2822static inline int scrub_calc_parity_bitmap_len(int nsectors)
2823{
Zhao Leibfca9a62014-12-08 19:55:57 +08002824 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002825}
2826
2827static void scrub_parity_get(struct scrub_parity *sparity)
2828{
Zhao Lei57019342015-01-20 15:11:45 +08002829 atomic_inc(&sparity->refs);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002830}
2831
2832static void scrub_parity_put(struct scrub_parity *sparity)
2833{
Zhao Lei57019342015-01-20 15:11:45 +08002834 if (!atomic_dec_and_test(&sparity->refs))
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002835 return;
2836
2837 scrub_parity_check_and_repair(sparity);
2838}
2839
2840static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2841 struct map_lookup *map,
2842 struct btrfs_device *sdev,
2843 struct btrfs_path *path,
2844 u64 logic_start,
2845 u64 logic_end)
2846{
2847 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2848 struct btrfs_root *root = fs_info->extent_root;
2849 struct btrfs_root *csum_root = fs_info->csum_root;
2850 struct btrfs_extent_item *extent;
Omar Sandoval4a770892015-06-19 11:52:52 -07002851 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002852 u64 flags;
2853 int ret;
2854 int slot;
2855 struct extent_buffer *l;
2856 struct btrfs_key key;
2857 u64 generation;
2858 u64 extent_logical;
2859 u64 extent_physical;
2860 u64 extent_len;
Omar Sandoval4a770892015-06-19 11:52:52 -07002861 u64 mapped_length;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002862 struct btrfs_device *extent_dev;
2863 struct scrub_parity *sparity;
2864 int nsectors;
2865 int bitmap_len;
2866 int extent_mirror_num;
2867 int stop_loop = 0;
2868
Liu Bo3d8da672016-04-26 17:53:31 -07002869 nsectors = div_u64(map->stripe_len, root->sectorsize);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002870 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2871 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2872 GFP_NOFS);
2873 if (!sparity) {
2874 spin_lock(&sctx->stat_lock);
2875 sctx->stat.malloc_errors++;
2876 spin_unlock(&sctx->stat_lock);
2877 return -ENOMEM;
2878 }
2879
2880 sparity->stripe_len = map->stripe_len;
2881 sparity->nsectors = nsectors;
2882 sparity->sctx = sctx;
2883 sparity->scrub_dev = sdev;
2884 sparity->logic_start = logic_start;
2885 sparity->logic_end = logic_end;
Zhao Lei57019342015-01-20 15:11:45 +08002886 atomic_set(&sparity->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002887 INIT_LIST_HEAD(&sparity->spages);
2888 sparity->dbitmap = sparity->bitmap;
2889 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2890
2891 ret = 0;
2892 while (logic_start < logic_end) {
2893 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2894 key.type = BTRFS_METADATA_ITEM_KEY;
2895 else
2896 key.type = BTRFS_EXTENT_ITEM_KEY;
2897 key.objectid = logic_start;
2898 key.offset = (u64)-1;
2899
2900 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2901 if (ret < 0)
2902 goto out;
2903
2904 if (ret > 0) {
2905 ret = btrfs_previous_extent_item(root, path, 0);
2906 if (ret < 0)
2907 goto out;
2908 if (ret > 0) {
2909 btrfs_release_path(path);
2910 ret = btrfs_search_slot(NULL, root, &key,
2911 path, 0, 0);
2912 if (ret < 0)
2913 goto out;
2914 }
2915 }
2916
2917 stop_loop = 0;
2918 while (1) {
2919 u64 bytes;
2920
2921 l = path->nodes[0];
2922 slot = path->slots[0];
2923 if (slot >= btrfs_header_nritems(l)) {
2924 ret = btrfs_next_leaf(root, path);
2925 if (ret == 0)
2926 continue;
2927 if (ret < 0)
2928 goto out;
2929
2930 stop_loop = 1;
2931 break;
2932 }
2933 btrfs_item_key_to_cpu(l, &key, slot);
2934
Zhao Leid7cad232015-07-22 13:14:48 +08002935 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2936 key.type != BTRFS_METADATA_ITEM_KEY)
2937 goto next;
2938
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002939 if (key.type == BTRFS_METADATA_ITEM_KEY)
2940 bytes = root->nodesize;
2941 else
2942 bytes = key.offset;
2943
2944 if (key.objectid + bytes <= logic_start)
2945 goto next;
2946
Zhao Leia0dd59d2015-07-21 15:42:26 +08002947 if (key.objectid >= logic_end) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002948 stop_loop = 1;
2949 break;
2950 }
2951
2952 while (key.objectid >= logic_start + map->stripe_len)
2953 logic_start += map->stripe_len;
2954
2955 extent = btrfs_item_ptr(l, slot,
2956 struct btrfs_extent_item);
2957 flags = btrfs_extent_flags(l, extent);
2958 generation = btrfs_extent_generation(l, extent);
2959
Zhao Leia323e812015-07-23 12:29:49 +08002960 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2961 (key.objectid < logic_start ||
2962 key.objectid + bytes >
2963 logic_start + map->stripe_len)) {
2964 btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2965 key.objectid, logic_start);
Zhao Lei9799d2c2015-08-25 21:31:40 +08002966 spin_lock(&sctx->stat_lock);
2967 sctx->stat.uncorrectable_errors++;
2968 spin_unlock(&sctx->stat_lock);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002969 goto next;
2970 }
2971again:
2972 extent_logical = key.objectid;
2973 extent_len = bytes;
2974
2975 if (extent_logical < logic_start) {
2976 extent_len -= logic_start - extent_logical;
2977 extent_logical = logic_start;
2978 }
2979
2980 if (extent_logical + extent_len >
2981 logic_start + map->stripe_len)
2982 extent_len = logic_start + map->stripe_len -
2983 extent_logical;
2984
2985 scrub_parity_mark_sectors_data(sparity, extent_logical,
2986 extent_len);
2987
Omar Sandoval4a770892015-06-19 11:52:52 -07002988 mapped_length = extent_len;
Zhao Leif1fee652016-05-17 17:37:38 +08002989 bbio = NULL;
Omar Sandoval4a770892015-06-19 11:52:52 -07002990 ret = btrfs_map_block(fs_info, READ, extent_logical,
2991 &mapped_length, &bbio, 0);
2992 if (!ret) {
2993 if (!bbio || mapped_length < extent_len)
2994 ret = -EIO;
2995 }
2996 if (ret) {
2997 btrfs_put_bbio(bbio);
2998 goto out;
2999 }
3000 extent_physical = bbio->stripes[0].physical;
3001 extent_mirror_num = bbio->mirror_num;
3002 extent_dev = bbio->stripes[0].dev;
3003 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003004
3005 ret = btrfs_lookup_csums_range(csum_root,
3006 extent_logical,
3007 extent_logical + extent_len - 1,
3008 &sctx->csum_list, 1);
3009 if (ret)
3010 goto out;
3011
3012 ret = scrub_extent_for_parity(sparity, extent_logical,
3013 extent_len,
3014 extent_physical,
3015 extent_dev, flags,
3016 generation,
3017 extent_mirror_num);
Zhao Lei6fa96d72015-07-21 12:22:30 +08003018
3019 scrub_free_csums(sctx);
3020
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003021 if (ret)
3022 goto out;
3023
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003024 if (extent_logical + extent_len <
3025 key.objectid + bytes) {
3026 logic_start += map->stripe_len;
3027
3028 if (logic_start >= logic_end) {
3029 stop_loop = 1;
3030 break;
3031 }
3032
3033 if (logic_start < key.objectid + bytes) {
3034 cond_resched();
3035 goto again;
3036 }
3037 }
3038next:
3039 path->slots[0]++;
3040 }
3041
3042 btrfs_release_path(path);
3043
3044 if (stop_loop)
3045 break;
3046
3047 logic_start += map->stripe_len;
3048 }
3049out:
3050 if (ret < 0)
3051 scrub_parity_mark_sectors_error(sparity, logic_start,
Zhao Leia0dd59d2015-07-21 15:42:26 +08003052 logic_end - logic_start);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003053 scrub_parity_put(sparity);
3054 scrub_submit(sctx);
3055 mutex_lock(&sctx->wr_ctx.wr_lock);
3056 scrub_wr_submit(sctx);
3057 mutex_unlock(&sctx->wr_ctx.wr_lock);
3058
3059 btrfs_release_path(path);
3060 return ret < 0 ? ret : 0;
3061}
3062
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003063static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003064 struct map_lookup *map,
3065 struct btrfs_device *scrub_dev,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003066 int num, u64 base, u64 length,
3067 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003068{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003069 struct btrfs_path *path, *ppath;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003070 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01003071 struct btrfs_root *root = fs_info->extent_root;
3072 struct btrfs_root *csum_root = fs_info->csum_root;
3073 struct btrfs_extent_item *extent;
Arne Jansene7786c32011-05-28 20:58:38 +00003074 struct blk_plug plug;
Arne Jansena2de7332011-03-08 14:14:00 +01003075 u64 flags;
3076 int ret;
3077 int slot;
Arne Jansena2de7332011-03-08 14:14:00 +01003078 u64 nstripes;
Arne Jansena2de7332011-03-08 14:14:00 +01003079 struct extent_buffer *l;
Arne Jansena2de7332011-03-08 14:14:00 +01003080 u64 physical;
3081 u64 logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003082 u64 logic_end;
Wang Shilong3b080b22014-04-01 18:01:43 +08003083 u64 physical_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003084 u64 generation;
Jan Schmidte12fa9c2011-06-17 15:55:21 +02003085 int mirror_num;
Arne Jansen7a262852011-06-10 12:39:23 +02003086 struct reada_control *reada1;
3087 struct reada_control *reada2;
David Sterbae6c11f92016-03-24 18:00:53 +01003088 struct btrfs_key key;
Arne Jansen7a262852011-06-10 12:39:23 +02003089 struct btrfs_key key_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003090 u64 increment = map->stripe_len;
3091 u64 offset;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003092 u64 extent_logical;
3093 u64 extent_physical;
3094 u64 extent_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003095 u64 stripe_logical;
3096 u64 stripe_end;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003097 struct btrfs_device *extent_dev;
3098 int extent_mirror_num;
Wang Shilong3b080b22014-04-01 18:01:43 +08003099 int stop_loop = 0;
David Woodhouse53b381b2013-01-29 18:40:14 -05003100
Wang Shilong3b080b22014-04-01 18:01:43 +08003101 physical = map->stripes[num].physical;
Arne Jansena2de7332011-03-08 14:14:00 +01003102 offset = 0;
David Sterbab8b93ad2015-01-16 17:26:13 +01003103 nstripes = div_u64(length, map->stripe_len);
Arne Jansena2de7332011-03-08 14:14:00 +01003104 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3105 offset = map->stripe_len * num;
3106 increment = map->stripe_len * map->num_stripes;
Jan Schmidt193ea742011-06-13 19:56:54 +02003107 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003108 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3109 int factor = map->num_stripes / map->sub_stripes;
3110 offset = map->stripe_len * (num / map->sub_stripes);
3111 increment = map->stripe_len * factor;
Jan Schmidt193ea742011-06-13 19:56:54 +02003112 mirror_num = num % map->sub_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003113 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3114 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003115 mirror_num = num % map->num_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003116 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3117 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003118 mirror_num = num % map->num_stripes + 1;
Zhao Leiffe2d202015-01-20 15:11:44 +08003119 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003120 get_raid56_logic_offset(physical, num, map, &offset, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003121 increment = map->stripe_len * nr_data_stripes(map);
3122 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003123 } else {
3124 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003125 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003126 }
3127
3128 path = btrfs_alloc_path();
3129 if (!path)
3130 return -ENOMEM;
3131
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003132 ppath = btrfs_alloc_path();
3133 if (!ppath) {
Tsutomu Itoh379d6852015-01-09 17:37:52 +09003134 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003135 return -ENOMEM;
3136 }
3137
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003138 /*
3139 * work on commit root. The related disk blocks are static as
3140 * long as COW is applied. This means, it is save to rewrite
3141 * them to repair disk errors without any race conditions
3142 */
Arne Jansena2de7332011-03-08 14:14:00 +01003143 path->search_commit_root = 1;
3144 path->skip_locking = 1;
3145
Gui Hecheng063c54d2015-01-09 09:39:40 +08003146 ppath->search_commit_root = 1;
3147 ppath->skip_locking = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003148 /*
Arne Jansen7a262852011-06-10 12:39:23 +02003149 * trigger the readahead for extent tree csum tree and wait for
3150 * completion. During readahead, the scrub is officially paused
3151 * to not hold off transaction commits
Arne Jansena2de7332011-03-08 14:14:00 +01003152 */
3153 logical = base + offset;
Wang Shilong3b080b22014-04-01 18:01:43 +08003154 physical_end = physical + nstripes * map->stripe_len;
Zhao Leiffe2d202015-01-20 15:11:44 +08003155 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003156 get_raid56_logic_offset(physical_end, num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003157 map, &logic_end, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003158 logic_end += base;
3159 } else {
3160 logic_end = logical + increment * nstripes;
3161 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003162 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003163 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilongcb7ab022013-12-04 21:16:53 +08003164 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003165
Arne Jansen7a262852011-06-10 12:39:23 +02003166 /* FIXME it might be better to start readahead at commit root */
David Sterbae6c11f92016-03-24 18:00:53 +01003167 key.objectid = logical;
3168 key.type = BTRFS_EXTENT_ITEM_KEY;
3169 key.offset = (u64)0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003170 key_end.objectid = logic_end;
Josef Bacik3173a182013-03-07 14:22:04 -05003171 key_end.type = BTRFS_METADATA_ITEM_KEY;
3172 key_end.offset = (u64)-1;
David Sterbae6c11f92016-03-24 18:00:53 +01003173 reada1 = btrfs_reada_add(root, &key, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003174
David Sterbae6c11f92016-03-24 18:00:53 +01003175 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3176 key.type = BTRFS_EXTENT_CSUM_KEY;
3177 key.offset = logical;
Arne Jansen7a262852011-06-10 12:39:23 +02003178 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3179 key_end.type = BTRFS_EXTENT_CSUM_KEY;
Wang Shilong3b080b22014-04-01 18:01:43 +08003180 key_end.offset = logic_end;
David Sterbae6c11f92016-03-24 18:00:53 +01003181 reada2 = btrfs_reada_add(csum_root, &key, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003182
Arne Jansen7a262852011-06-10 12:39:23 +02003183 if (!IS_ERR(reada1))
3184 btrfs_reada_wait(reada1);
3185 if (!IS_ERR(reada2))
3186 btrfs_reada_wait(reada2);
Arne Jansena2de7332011-03-08 14:14:00 +01003187
Arne Jansena2de7332011-03-08 14:14:00 +01003188
3189 /*
3190 * collect all data csums for the stripe to avoid seeking during
3191 * the scrub. This might currently (crc32) end up to be about 1MB
3192 */
Arne Jansene7786c32011-05-28 20:58:38 +00003193 blk_start_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003194
Arne Jansena2de7332011-03-08 14:14:00 +01003195 /*
3196 * now find all extents for each stripe and scrub them
3197 */
Arne Jansena2de7332011-03-08 14:14:00 +01003198 ret = 0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003199 while (physical < physical_end) {
Arne Jansena2de7332011-03-08 14:14:00 +01003200 /*
3201 * canceled?
3202 */
3203 if (atomic_read(&fs_info->scrub_cancel_req) ||
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003204 atomic_read(&sctx->cancel_req)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003205 ret = -ECANCELED;
3206 goto out;
3207 }
3208 /*
3209 * check to see if we have to pause
3210 */
3211 if (atomic_read(&fs_info->scrub_pause_req)) {
3212 /* push queued extents */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003213 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003214 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003215 mutex_lock(&sctx->wr_ctx.wr_lock);
3216 scrub_wr_submit(sctx);
3217 mutex_unlock(&sctx->wr_ctx.wr_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003218 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003219 atomic_read(&sctx->bios_in_flight) == 0);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003220 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
Wang Shilong3cb09292013-12-04 21:15:19 +08003221 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003222 }
3223
Zhao Leif2f66a22015-07-21 12:22:29 +08003224 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3225 ret = get_raid56_logic_offset(physical, num, map,
3226 &logical,
3227 &stripe_logical);
3228 logical += base;
3229 if (ret) {
Zhao Lei79553232015-08-18 17:54:30 +08003230 /* it is parity strip */
Zhao Leif2f66a22015-07-21 12:22:29 +08003231 stripe_logical += base;
Zhao Leia0dd59d2015-07-21 15:42:26 +08003232 stripe_end = stripe_logical + increment;
Zhao Leif2f66a22015-07-21 12:22:29 +08003233 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3234 ppath, stripe_logical,
3235 stripe_end);
3236 if (ret)
3237 goto out;
3238 goto skip;
3239 }
3240 }
3241
Wang Shilong7c76edb2014-01-12 21:38:32 +08003242 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3243 key.type = BTRFS_METADATA_ITEM_KEY;
3244 else
3245 key.type = BTRFS_EXTENT_ITEM_KEY;
Arne Jansena2de7332011-03-08 14:14:00 +01003246 key.objectid = logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003247 key.offset = (u64)-1;
Arne Jansena2de7332011-03-08 14:14:00 +01003248
3249 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3250 if (ret < 0)
3251 goto out;
Josef Bacik3173a182013-03-07 14:22:04 -05003252
Arne Jansen8c510322011-06-03 10:09:26 +02003253 if (ret > 0) {
Wang Shilongade2e0b2014-01-12 21:38:33 +08003254 ret = btrfs_previous_extent_item(root, path, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003255 if (ret < 0)
3256 goto out;
Arne Jansen8c510322011-06-03 10:09:26 +02003257 if (ret > 0) {
3258 /* there's no smaller item, so stick with the
3259 * larger one */
3260 btrfs_release_path(path);
3261 ret = btrfs_search_slot(NULL, root, &key,
3262 path, 0, 0);
3263 if (ret < 0)
3264 goto out;
3265 }
Arne Jansena2de7332011-03-08 14:14:00 +01003266 }
3267
Liu Bo625f1c8d2013-04-27 02:56:57 +00003268 stop_loop = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003269 while (1) {
Josef Bacik3173a182013-03-07 14:22:04 -05003270 u64 bytes;
3271
Arne Jansena2de7332011-03-08 14:14:00 +01003272 l = path->nodes[0];
3273 slot = path->slots[0];
3274 if (slot >= btrfs_header_nritems(l)) {
3275 ret = btrfs_next_leaf(root, path);
3276 if (ret == 0)
3277 continue;
3278 if (ret < 0)
3279 goto out;
3280
Liu Bo625f1c8d2013-04-27 02:56:57 +00003281 stop_loop = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003282 break;
3283 }
3284 btrfs_item_key_to_cpu(l, &key, slot);
3285
Zhao Leid7cad232015-07-22 13:14:48 +08003286 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3287 key.type != BTRFS_METADATA_ITEM_KEY)
3288 goto next;
3289
Josef Bacik3173a182013-03-07 14:22:04 -05003290 if (key.type == BTRFS_METADATA_ITEM_KEY)
David Sterba707e8a02014-06-04 19:22:26 +02003291 bytes = root->nodesize;
Josef Bacik3173a182013-03-07 14:22:04 -05003292 else
3293 bytes = key.offset;
3294
3295 if (key.objectid + bytes <= logical)
Arne Jansena2de7332011-03-08 14:14:00 +01003296 goto next;
3297
Liu Bo625f1c8d2013-04-27 02:56:57 +00003298 if (key.objectid >= logical + map->stripe_len) {
3299 /* out of this device extent */
3300 if (key.objectid >= logic_end)
3301 stop_loop = 1;
3302 break;
3303 }
Arne Jansena2de7332011-03-08 14:14:00 +01003304
3305 extent = btrfs_item_ptr(l, slot,
3306 struct btrfs_extent_item);
3307 flags = btrfs_extent_flags(l, extent);
3308 generation = btrfs_extent_generation(l, extent);
3309
Zhao Leia323e812015-07-23 12:29:49 +08003310 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3311 (key.objectid < logical ||
3312 key.objectid + bytes >
3313 logical + map->stripe_len)) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003314 btrfs_err(fs_info,
3315 "scrub: tree block %llu spanning "
3316 "stripes, ignored. logical=%llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02003317 key.objectid, logical);
Zhao Lei9799d2c2015-08-25 21:31:40 +08003318 spin_lock(&sctx->stat_lock);
3319 sctx->stat.uncorrectable_errors++;
3320 spin_unlock(&sctx->stat_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003321 goto next;
3322 }
3323
Liu Bo625f1c8d2013-04-27 02:56:57 +00003324again:
3325 extent_logical = key.objectid;
3326 extent_len = bytes;
3327
Arne Jansena2de7332011-03-08 14:14:00 +01003328 /*
3329 * trim extent to this stripe
3330 */
Liu Bo625f1c8d2013-04-27 02:56:57 +00003331 if (extent_logical < logical) {
3332 extent_len -= logical - extent_logical;
3333 extent_logical = logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003334 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003335 if (extent_logical + extent_len >
Arne Jansena2de7332011-03-08 14:14:00 +01003336 logical + map->stripe_len) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003337 extent_len = logical + map->stripe_len -
3338 extent_logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003339 }
3340
Liu Bo625f1c8d2013-04-27 02:56:57 +00003341 extent_physical = extent_logical - logical + physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003342 extent_dev = scrub_dev;
3343 extent_mirror_num = mirror_num;
3344 if (is_dev_replace)
3345 scrub_remap_extent(fs_info, extent_logical,
3346 extent_len, &extent_physical,
3347 &extent_dev,
3348 &extent_mirror_num);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003349
Zhao Leife8cf652015-07-22 13:14:47 +08003350 ret = btrfs_lookup_csums_range(csum_root,
3351 extent_logical,
3352 extent_logical +
3353 extent_len - 1,
3354 &sctx->csum_list, 1);
Arne Jansena2de7332011-03-08 14:14:00 +01003355 if (ret)
3356 goto out;
3357
Liu Bo625f1c8d2013-04-27 02:56:57 +00003358 ret = scrub_extent(sctx, extent_logical, extent_len,
3359 extent_physical, extent_dev, flags,
3360 generation, extent_mirror_num,
Stefan Behrens115930c2013-07-04 16:14:23 +02003361 extent_logical - logical + physical);
Zhao Lei6fa96d72015-07-21 12:22:30 +08003362
3363 scrub_free_csums(sctx);
3364
Liu Bo625f1c8d2013-04-27 02:56:57 +00003365 if (ret)
3366 goto out;
3367
3368 if (extent_logical + extent_len <
3369 key.objectid + bytes) {
Zhao Leiffe2d202015-01-20 15:11:44 +08003370 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003371 /*
3372 * loop until we find next data stripe
3373 * or we have finished all stripes.
3374 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003375loop:
3376 physical += map->stripe_len;
3377 ret = get_raid56_logic_offset(physical,
3378 num, map, &logical,
3379 &stripe_logical);
3380 logical += base;
3381
3382 if (ret && physical < physical_end) {
3383 stripe_logical += base;
3384 stripe_end = stripe_logical +
Zhao Leia0dd59d2015-07-21 15:42:26 +08003385 increment;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003386 ret = scrub_raid56_parity(sctx,
3387 map, scrub_dev, ppath,
3388 stripe_logical,
3389 stripe_end);
3390 if (ret)
3391 goto out;
3392 goto loop;
3393 }
Wang Shilong3b080b22014-04-01 18:01:43 +08003394 } else {
3395 physical += map->stripe_len;
3396 logical += increment;
3397 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003398 if (logical < key.objectid + bytes) {
3399 cond_resched();
3400 goto again;
3401 }
3402
Wang Shilong3b080b22014-04-01 18:01:43 +08003403 if (physical >= physical_end) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003404 stop_loop = 1;
3405 break;
3406 }
3407 }
Arne Jansena2de7332011-03-08 14:14:00 +01003408next:
3409 path->slots[0]++;
3410 }
Chris Mason71267332011-05-23 06:30:52 -04003411 btrfs_release_path(path);
Wang Shilong3b080b22014-04-01 18:01:43 +08003412skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003413 logical += increment;
3414 physical += map->stripe_len;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003415 spin_lock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003416 if (stop_loop)
3417 sctx->stat.last_physical = map->stripes[num].physical +
3418 length;
3419 else
3420 sctx->stat.last_physical = physical;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003421 spin_unlock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003422 if (stop_loop)
3423 break;
Arne Jansena2de7332011-03-08 14:14:00 +01003424 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003425out:
Arne Jansena2de7332011-03-08 14:14:00 +01003426 /* push queued extents */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003427 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003428 mutex_lock(&sctx->wr_ctx.wr_lock);
3429 scrub_wr_submit(sctx);
3430 mutex_unlock(&sctx->wr_ctx.wr_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003431
Arne Jansene7786c32011-05-28 20:58:38 +00003432 blk_finish_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003433 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003434 btrfs_free_path(ppath);
Arne Jansena2de7332011-03-08 14:14:00 +01003435 return ret < 0 ? ret : 0;
3436}
3437
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003438static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003439 struct btrfs_device *scrub_dev,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003440 u64 chunk_offset, u64 length,
Filipe Manana020d5b72015-11-19 10:57:20 +00003441 u64 dev_offset,
3442 struct btrfs_block_group_cache *cache,
3443 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003444{
3445 struct btrfs_mapping_tree *map_tree =
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003446 &sctx->dev_root->fs_info->mapping_tree;
Arne Jansena2de7332011-03-08 14:14:00 +01003447 struct map_lookup *map;
3448 struct extent_map *em;
3449 int i;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003450 int ret = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003451
3452 read_lock(&map_tree->map_tree.lock);
3453 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3454 read_unlock(&map_tree->map_tree.lock);
3455
Filipe Manana020d5b72015-11-19 10:57:20 +00003456 if (!em) {
3457 /*
3458 * Might have been an unused block group deleted by the cleaner
3459 * kthread or relocation.
3460 */
3461 spin_lock(&cache->lock);
3462 if (!cache->removed)
3463 ret = -EINVAL;
3464 spin_unlock(&cache->lock);
3465
3466 return ret;
3467 }
Arne Jansena2de7332011-03-08 14:14:00 +01003468
Jeff Mahoney95617d62015-06-03 10:55:48 -04003469 map = em->map_lookup;
Arne Jansena2de7332011-03-08 14:14:00 +01003470 if (em->start != chunk_offset)
3471 goto out;
3472
3473 if (em->len < length)
3474 goto out;
3475
3476 for (i = 0; i < map->num_stripes; ++i) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003477 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
Arne Jansen859acaf2012-02-09 15:09:02 +01003478 map->stripes[i].physical == dev_offset) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003479 ret = scrub_stripe(sctx, map, scrub_dev, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003480 chunk_offset, length,
3481 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003482 if (ret)
3483 goto out;
3484 }
3485 }
3486out:
3487 free_extent_map(em);
3488
3489 return ret;
3490}
3491
3492static noinline_for_stack
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003493int scrub_enumerate_chunks(struct scrub_ctx *sctx,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003494 struct btrfs_device *scrub_dev, u64 start, u64 end,
3495 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003496{
3497 struct btrfs_dev_extent *dev_extent = NULL;
3498 struct btrfs_path *path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003499 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003500 struct btrfs_fs_info *fs_info = root->fs_info;
3501 u64 length;
Arne Jansena2de7332011-03-08 14:14:00 +01003502 u64 chunk_offset;
Zhaolei55e3a602015-08-05 16:43:30 +08003503 int ret = 0;
Zhaolei76a8efa2015-11-17 18:46:17 +08003504 int ro_set;
Arne Jansena2de7332011-03-08 14:14:00 +01003505 int slot;
3506 struct extent_buffer *l;
3507 struct btrfs_key key;
3508 struct btrfs_key found_key;
3509 struct btrfs_block_group_cache *cache;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003510 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
Arne Jansena2de7332011-03-08 14:14:00 +01003511
3512 path = btrfs_alloc_path();
3513 if (!path)
3514 return -ENOMEM;
3515
David Sterbae4058b52015-11-27 16:31:35 +01003516 path->reada = READA_FORWARD;
Arne Jansena2de7332011-03-08 14:14:00 +01003517 path->search_commit_root = 1;
3518 path->skip_locking = 1;
3519
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003520 key.objectid = scrub_dev->devid;
Arne Jansena2de7332011-03-08 14:14:00 +01003521 key.offset = 0ull;
3522 key.type = BTRFS_DEV_EXTENT_KEY;
3523
Arne Jansena2de7332011-03-08 14:14:00 +01003524 while (1) {
3525 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3526 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003527 break;
3528 if (ret > 0) {
3529 if (path->slots[0] >=
3530 btrfs_header_nritems(path->nodes[0])) {
3531 ret = btrfs_next_leaf(root, path);
Zhaolei55e3a602015-08-05 16:43:30 +08003532 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003533 break;
Zhaolei55e3a602015-08-05 16:43:30 +08003534 if (ret > 0) {
3535 ret = 0;
3536 break;
3537 }
3538 } else {
3539 ret = 0;
Arne Jansen8c510322011-06-03 10:09:26 +02003540 }
3541 }
Arne Jansena2de7332011-03-08 14:14:00 +01003542
3543 l = path->nodes[0];
3544 slot = path->slots[0];
3545
3546 btrfs_item_key_to_cpu(l, &found_key, slot);
3547
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003548 if (found_key.objectid != scrub_dev->devid)
Arne Jansena2de7332011-03-08 14:14:00 +01003549 break;
3550
David Sterba962a2982014-06-04 18:41:45 +02003551 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
Arne Jansena2de7332011-03-08 14:14:00 +01003552 break;
3553
3554 if (found_key.offset >= end)
3555 break;
3556
3557 if (found_key.offset < key.offset)
3558 break;
3559
3560 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3561 length = btrfs_dev_extent_length(l, dev_extent);
3562
Qu Wenruoced96ed2014-06-19 10:42:51 +08003563 if (found_key.offset + length <= start)
3564 goto skip;
Arne Jansena2de7332011-03-08 14:14:00 +01003565
Arne Jansena2de7332011-03-08 14:14:00 +01003566 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3567
3568 /*
3569 * get a reference on the corresponding block group to prevent
3570 * the chunk from going away while we scrub it
3571 */
3572 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
Qu Wenruoced96ed2014-06-19 10:42:51 +08003573
3574 /* some chunks are removed but not committed to disk yet,
3575 * continue scrubbing */
3576 if (!cache)
3577 goto skip;
3578
Zhaolei55e3a602015-08-05 16:43:30 +08003579 /*
3580 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3581 * to avoid deadlock caused by:
3582 * btrfs_inc_block_group_ro()
3583 * -> btrfs_wait_for_commit()
3584 * -> btrfs_commit_transaction()
3585 * -> btrfs_scrub_pause()
3586 */
3587 scrub_pause_on(fs_info);
3588 ret = btrfs_inc_block_group_ro(root, cache);
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003589 if (!ret && is_dev_replace) {
3590 /*
3591 * If we are doing a device replace wait for any tasks
3592 * that started dellaloc right before we set the block
3593 * group to RO mode, as they might have just allocated
3594 * an extent from it or decided they could do a nocow
3595 * write. And if any such tasks did that, wait for their
3596 * ordered extents to complete and then commit the
3597 * current transaction, so that we can later see the new
3598 * extent items in the extent tree - the ordered extents
3599 * create delayed data references (for cow writes) when
3600 * they complete, which will be run and insert the
3601 * corresponding extent items into the extent tree when
3602 * we commit the transaction they used when running
3603 * inode.c:btrfs_finish_ordered_io(). We later use
3604 * the commit root of the extent tree to find extents
3605 * to copy from the srcdev into the tgtdev, and we don't
3606 * want to miss any new extents.
3607 */
3608 btrfs_wait_block_group_reservations(cache);
3609 btrfs_wait_nocow_writers(cache);
3610 ret = btrfs_wait_ordered_roots(fs_info, -1,
3611 cache->key.objectid,
3612 cache->key.offset);
3613 if (ret > 0) {
3614 struct btrfs_trans_handle *trans;
3615
3616 trans = btrfs_join_transaction(root);
3617 if (IS_ERR(trans))
3618 ret = PTR_ERR(trans);
3619 else
3620 ret = btrfs_commit_transaction(trans,
3621 root);
3622 if (ret) {
3623 scrub_pause_off(fs_info);
3624 btrfs_put_block_group(cache);
3625 break;
3626 }
3627 }
3628 }
Zhaolei55e3a602015-08-05 16:43:30 +08003629 scrub_pause_off(fs_info);
Zhaolei76a8efa2015-11-17 18:46:17 +08003630
3631 if (ret == 0) {
3632 ro_set = 1;
3633 } else if (ret == -ENOSPC) {
3634 /*
3635 * btrfs_inc_block_group_ro return -ENOSPC when it
3636 * failed in creating new chunk for metadata.
3637 * It is not a problem for scrub/replace, because
3638 * metadata are always cowed, and our scrub paused
3639 * commit_transactions.
3640 */
3641 ro_set = 0;
3642 } else {
3643 btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n",
3644 ret);
Zhaolei55e3a602015-08-05 16:43:30 +08003645 btrfs_put_block_group(cache);
3646 break;
3647 }
3648
Filipe Manana81e87a72016-05-14 16:32:35 +01003649 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003650 dev_replace->cursor_right = found_key.offset + length;
3651 dev_replace->cursor_left = found_key.offset;
3652 dev_replace->item_needs_writeback = 1;
Filipe Manana81e87a72016-05-14 16:32:35 +01003653 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
Zhao Lei8c204c92015-08-19 15:02:40 +08003654 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
Filipe Manana020d5b72015-11-19 10:57:20 +00003655 found_key.offset, cache, is_dev_replace);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003656
3657 /*
3658 * flush, submit all pending read and write bios, afterwards
3659 * wait for them.
3660 * Note that in the dev replace case, a read request causes
3661 * write requests that are submitted in the read completion
3662 * worker. Therefore in the current situation, it is required
3663 * that all write requests are flushed, so that all read and
3664 * write requests are really completed when bios_in_flight
3665 * changes to 0.
3666 */
3667 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3668 scrub_submit(sctx);
3669 mutex_lock(&sctx->wr_ctx.wr_lock);
3670 scrub_wr_submit(sctx);
3671 mutex_unlock(&sctx->wr_ctx.wr_lock);
3672
3673 wait_event(sctx->list_wait,
3674 atomic_read(&sctx->bios_in_flight) == 0);
Zhaoleib708ce92015-08-05 16:43:29 +08003675
3676 scrub_pause_on(fs_info);
Wang Shilong12cf9372014-02-19 19:24:17 +08003677
3678 /*
3679 * must be called before we decrease @scrub_paused.
3680 * make sure we don't block transaction commit while
3681 * we are waiting pending workers finished.
3682 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003683 wait_event(sctx->list_wait,
3684 atomic_read(&sctx->workers_pending) == 0);
Wang Shilong12cf9372014-02-19 19:24:17 +08003685 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3686
Zhaoleib708ce92015-08-05 16:43:29 +08003687 scrub_pause_off(fs_info);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003688
Filipe Manana1a1a8b72016-05-14 19:44:40 +01003689 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3690 dev_replace->cursor_left = dev_replace->cursor_right;
3691 dev_replace->item_needs_writeback = 1;
3692 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3693
Zhaolei76a8efa2015-11-17 18:46:17 +08003694 if (ro_set)
3695 btrfs_dec_block_group_ro(root, cache);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003696
Filipe Manana758f2df2015-11-19 11:45:48 +00003697 /*
3698 * We might have prevented the cleaner kthread from deleting
3699 * this block group if it was already unused because we raced
3700 * and set it to RO mode first. So add it back to the unused
3701 * list, otherwise it might not ever be deleted unless a manual
3702 * balance is triggered or it becomes used and unused again.
3703 */
3704 spin_lock(&cache->lock);
3705 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3706 btrfs_block_group_used(&cache->item) == 0) {
3707 spin_unlock(&cache->lock);
3708 spin_lock(&fs_info->unused_bgs_lock);
3709 if (list_empty(&cache->bg_list)) {
3710 btrfs_get_block_group(cache);
3711 list_add_tail(&cache->bg_list,
3712 &fs_info->unused_bgs);
3713 }
3714 spin_unlock(&fs_info->unused_bgs_lock);
3715 } else {
3716 spin_unlock(&cache->lock);
3717 }
3718
Arne Jansena2de7332011-03-08 14:14:00 +01003719 btrfs_put_block_group(cache);
3720 if (ret)
3721 break;
Stefan Behrensaf1be4f2012-11-27 17:39:51 +00003722 if (is_dev_replace &&
3723 atomic64_read(&dev_replace->num_write_errors) > 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003724 ret = -EIO;
3725 break;
3726 }
3727 if (sctx->stat.malloc_errors > 0) {
3728 ret = -ENOMEM;
3729 break;
3730 }
Qu Wenruoced96ed2014-06-19 10:42:51 +08003731skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003732 key.offset = found_key.offset + length;
Chris Mason71267332011-05-23 06:30:52 -04003733 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01003734 }
3735
Arne Jansena2de7332011-03-08 14:14:00 +01003736 btrfs_free_path(path);
Arne Jansen8c510322011-06-03 10:09:26 +02003737
Zhaolei55e3a602015-08-05 16:43:30 +08003738 return ret;
Arne Jansena2de7332011-03-08 14:14:00 +01003739}
3740
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003741static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3742 struct btrfs_device *scrub_dev)
Arne Jansena2de7332011-03-08 14:14:00 +01003743{
3744 int i;
3745 u64 bytenr;
3746 u64 gen;
3747 int ret;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003748 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003749
Miao Xie87533c42013-01-29 10:14:48 +00003750 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003751 return -EIO;
3752
Miao Xie5f546062014-07-24 11:37:09 +08003753 /* Seed devices of a new filesystem has their own generation. */
3754 if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3755 gen = scrub_dev->generation;
3756 else
3757 gen = root->fs_info->last_trans_committed;
Arne Jansena2de7332011-03-08 14:14:00 +01003758
3759 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3760 bytenr = btrfs_sb_offset(i);
Miao Xie935e5cc2014-09-03 21:35:33 +08003761 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3762 scrub_dev->commit_total_bytes)
Arne Jansena2de7332011-03-08 14:14:00 +01003763 break;
3764
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003765 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003766 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003767 NULL, 1, bytenr);
Arne Jansena2de7332011-03-08 14:14:00 +01003768 if (ret)
3769 return ret;
3770 }
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003771 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003772
3773 return 0;
3774}
3775
3776/*
3777 * get a reference count on fs_info->scrub_workers. start worker if necessary
3778 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003779static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3780 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003781{
David Sterba6f011052015-02-16 18:34:01 +01003782 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003783 int max_active = fs_info->thread_pool_size;
Arne Jansena2de7332011-03-08 14:14:00 +01003784
Arne Jansen632dd772011-06-10 12:07:07 +02003785 if (fs_info->scrub_workers_refcnt == 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003786 if (is_dev_replace)
Qu Wenruo0339ef22014-02-28 10:46:17 +08003787 fs_info->scrub_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04003788 btrfs_alloc_workqueue(fs_info, "scrub", flags,
Qu Wenruo0339ef22014-02-28 10:46:17 +08003789 1, 4);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003790 else
Qu Wenruo0339ef22014-02-28 10:46:17 +08003791 fs_info->scrub_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04003792 btrfs_alloc_workqueue(fs_info, "scrub", flags,
Qu Wenruo0339ef22014-02-28 10:46:17 +08003793 max_active, 4);
Zhao Leie82afc52015-06-12 20:36:58 +08003794 if (!fs_info->scrub_workers)
3795 goto fail_scrub_workers;
3796
Qu Wenruo0339ef22014-02-28 10:46:17 +08003797 fs_info->scrub_wr_completion_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04003798 btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
Qu Wenruo0339ef22014-02-28 10:46:17 +08003799 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08003800 if (!fs_info->scrub_wr_completion_workers)
3801 goto fail_scrub_wr_completion_workers;
3802
Qu Wenruo0339ef22014-02-28 10:46:17 +08003803 fs_info->scrub_nocow_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04003804 btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
Zhao Leie82afc52015-06-12 20:36:58 +08003805 if (!fs_info->scrub_nocow_workers)
3806 goto fail_scrub_nocow_workers;
Zhao Lei20b2e302015-06-04 20:09:15 +08003807 fs_info->scrub_parity_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04003808 btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
Zhao Lei20b2e302015-06-04 20:09:15 +08003809 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08003810 if (!fs_info->scrub_parity_workers)
3811 goto fail_scrub_parity_workers;
Arne Jansen632dd772011-06-10 12:07:07 +02003812 }
Arne Jansena2de7332011-03-08 14:14:00 +01003813 ++fs_info->scrub_workers_refcnt;
Zhao Leie82afc52015-06-12 20:36:58 +08003814 return 0;
3815
3816fail_scrub_parity_workers:
3817 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3818fail_scrub_nocow_workers:
3819 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3820fail_scrub_wr_completion_workers:
3821 btrfs_destroy_workqueue(fs_info->scrub_workers);
3822fail_scrub_workers:
3823 return -ENOMEM;
Arne Jansena2de7332011-03-08 14:14:00 +01003824}
3825
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003826static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003827{
Stefan Behrensff023aa2012-11-06 11:43:11 +01003828 if (--fs_info->scrub_workers_refcnt == 0) {
Qu Wenruo0339ef22014-02-28 10:46:17 +08003829 btrfs_destroy_workqueue(fs_info->scrub_workers);
3830 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3831 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
Zhao Lei20b2e302015-06-04 20:09:15 +08003832 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003833 }
Arne Jansena2de7332011-03-08 14:14:00 +01003834 WARN_ON(fs_info->scrub_workers_refcnt < 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003835}
3836
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003837int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3838 u64 end, struct btrfs_scrub_progress *progress,
Stefan Behrens63a212a2012-11-05 18:29:28 +01003839 int readonly, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003840{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003841 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003842 int ret;
3843 struct btrfs_device *dev;
Miao Xie5d68da32014-07-24 11:37:07 +08003844 struct rcu_string *name;
Arne Jansena2de7332011-03-08 14:14:00 +01003845
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003846 if (btrfs_fs_closing(fs_info))
Arne Jansena2de7332011-03-08 14:14:00 +01003847 return -EINVAL;
3848
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003849 if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003850 /*
3851 * in this case scrub is unable to calculate the checksum
3852 * the way scrub is implemented. Do not handle this
3853 * situation at all because it won't ever happen.
3854 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003855 btrfs_err(fs_info,
3856 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003857 fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003858 return -EINVAL;
3859 }
3860
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003861 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003862 /* not supported for data w/o checksums */
Chandan Rajendra751bebb2016-07-04 10:04:39 +05303863 btrfs_err_rl(fs_info,
Frank Holtonefe120a2013-12-20 11:37:06 -05003864 "scrub: size assumption sectorsize != PAGE_SIZE "
3865 "(%d != %lu) fails",
Geert Uytterhoeven27f9f022013-08-20 13:20:09 +02003866 fs_info->chunk_root->sectorsize, PAGE_SIZE);
Arne Jansena2de7332011-03-08 14:14:00 +01003867 return -EINVAL;
3868 }
3869
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003870 if (fs_info->chunk_root->nodesize >
3871 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3872 fs_info->chunk_root->sectorsize >
3873 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3874 /*
3875 * would exhaust the array bounds of pagev member in
3876 * struct scrub_block
3877 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003878 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3879 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003880 fs_info->chunk_root->nodesize,
3881 SCRUB_MAX_PAGES_PER_BLOCK,
3882 fs_info->chunk_root->sectorsize,
3883 SCRUB_MAX_PAGES_PER_BLOCK);
3884 return -EINVAL;
3885 }
3886
Arne Jansena2de7332011-03-08 14:14:00 +01003887
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003888 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3889 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
Stefan Behrens63a212a2012-11-05 18:29:28 +01003890 if (!dev || (dev->missing && !is_dev_replace)) {
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003891 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003892 return -ENODEV;
3893 }
Arne Jansena2de7332011-03-08 14:14:00 +01003894
Miao Xie5d68da32014-07-24 11:37:07 +08003895 if (!is_dev_replace && !readonly && !dev->writeable) {
3896 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3897 rcu_read_lock();
3898 name = rcu_dereference(dev->name);
3899 btrfs_err(fs_info, "scrub: device %s is not writable",
3900 name->str);
3901 rcu_read_unlock();
3902 return -EROFS;
3903 }
3904
Wang Shilong3b7a0162013-10-12 02:11:12 +08003905 mutex_lock(&fs_info->scrub_lock);
Stefan Behrens63a212a2012-11-05 18:29:28 +01003906 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
Arne Jansena2de7332011-03-08 14:14:00 +01003907 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003908 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003909 return -EIO;
Arne Jansena2de7332011-03-08 14:14:00 +01003910 }
3911
Liu Bo73beece2015-07-17 16:49:19 +08003912 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
Stefan Behrens8dabb742012-11-06 13:15:27 +01003913 if (dev->scrub_device ||
3914 (!is_dev_replace &&
3915 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
Liu Bo73beece2015-07-17 16:49:19 +08003916 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003917 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003918 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003919 return -EINPROGRESS;
3920 }
Liu Bo73beece2015-07-17 16:49:19 +08003921 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
Wang Shilong3b7a0162013-10-12 02:11:12 +08003922
3923 ret = scrub_workers_get(fs_info, is_dev_replace);
3924 if (ret) {
3925 mutex_unlock(&fs_info->scrub_lock);
3926 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3927 return ret;
3928 }
3929
Stefan Behrens63a212a2012-11-05 18:29:28 +01003930 sctx = scrub_setup_ctx(dev, is_dev_replace);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003931 if (IS_ERR(sctx)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003932 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003933 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3934 scrub_workers_put(fs_info);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003935 return PTR_ERR(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003936 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003937 sctx->readonly = readonly;
3938 dev->scrub_device = sctx;
Wang Shilong3cb09292013-12-04 21:15:19 +08003939 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003940
Wang Shilong3cb09292013-12-04 21:15:19 +08003941 /*
3942 * checking @scrub_pause_req here, we can avoid
3943 * race between committing transaction and scrubbing.
3944 */
Wang Shilongcb7ab022013-12-04 21:16:53 +08003945 __scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003946 atomic_inc(&fs_info->scrubs_running);
3947 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003948
Stefan Behrensff023aa2012-11-06 11:43:11 +01003949 if (!is_dev_replace) {
Wang Shilong9b011ad2013-10-25 19:12:02 +08003950 /*
3951 * by holding device list mutex, we can
3952 * kick off writing super in log tree sync.
3953 */
Wang Shilong3cb09292013-12-04 21:15:19 +08003954 mutex_lock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003955 ret = scrub_supers(sctx, dev);
Wang Shilong3cb09292013-12-04 21:15:19 +08003956 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003957 }
Arne Jansena2de7332011-03-08 14:14:00 +01003958
3959 if (!ret)
Stefan Behrensff023aa2012-11-06 11:43:11 +01003960 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3961 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003962
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003963 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003964 atomic_dec(&fs_info->scrubs_running);
3965 wake_up(&fs_info->scrub_pause_wait);
3966
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003967 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02003968
Arne Jansena2de7332011-03-08 14:14:00 +01003969 if (progress)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003970 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01003971
3972 mutex_lock(&fs_info->scrub_lock);
3973 dev->scrub_device = NULL;
Wang Shilong3b7a0162013-10-12 02:11:12 +08003974 scrub_workers_put(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003975 mutex_unlock(&fs_info->scrub_lock);
3976
Filipe Mananaf55985f2015-02-09 21:14:24 +00003977 scrub_put_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003978
3979 return ret;
3980}
3981
Jeff Mahoney143bede2012-03-01 14:56:26 +01003982void btrfs_scrub_pause(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01003983{
3984 struct btrfs_fs_info *fs_info = root->fs_info;
3985
3986 mutex_lock(&fs_info->scrub_lock);
3987 atomic_inc(&fs_info->scrub_pause_req);
3988 while (atomic_read(&fs_info->scrubs_paused) !=
3989 atomic_read(&fs_info->scrubs_running)) {
3990 mutex_unlock(&fs_info->scrub_lock);
3991 wait_event(fs_info->scrub_pause_wait,
3992 atomic_read(&fs_info->scrubs_paused) ==
3993 atomic_read(&fs_info->scrubs_running));
3994 mutex_lock(&fs_info->scrub_lock);
3995 }
3996 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003997}
3998
Jeff Mahoney143bede2012-03-01 14:56:26 +01003999void btrfs_scrub_continue(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01004000{
4001 struct btrfs_fs_info *fs_info = root->fs_info;
4002
4003 atomic_dec(&fs_info->scrub_pause_req);
4004 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01004005}
4006
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004007int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01004008{
Arne Jansena2de7332011-03-08 14:14:00 +01004009 mutex_lock(&fs_info->scrub_lock);
4010 if (!atomic_read(&fs_info->scrubs_running)) {
4011 mutex_unlock(&fs_info->scrub_lock);
4012 return -ENOTCONN;
4013 }
4014
4015 atomic_inc(&fs_info->scrub_cancel_req);
4016 while (atomic_read(&fs_info->scrubs_running)) {
4017 mutex_unlock(&fs_info->scrub_lock);
4018 wait_event(fs_info->scrub_pause_wait,
4019 atomic_read(&fs_info->scrubs_running) == 0);
4020 mutex_lock(&fs_info->scrub_lock);
4021 }
4022 atomic_dec(&fs_info->scrub_cancel_req);
4023 mutex_unlock(&fs_info->scrub_lock);
4024
4025 return 0;
4026}
4027
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004028int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
4029 struct btrfs_device *dev)
Jeff Mahoney49b25e02012-03-01 17:24:58 +01004030{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004031 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01004032
4033 mutex_lock(&fs_info->scrub_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004034 sctx = dev->scrub_device;
4035 if (!sctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01004036 mutex_unlock(&fs_info->scrub_lock);
4037 return -ENOTCONN;
4038 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004039 atomic_inc(&sctx->cancel_req);
Arne Jansena2de7332011-03-08 14:14:00 +01004040 while (dev->scrub_device) {
4041 mutex_unlock(&fs_info->scrub_lock);
4042 wait_event(fs_info->scrub_pause_wait,
4043 dev->scrub_device == NULL);
4044 mutex_lock(&fs_info->scrub_lock);
4045 }
4046 mutex_unlock(&fs_info->scrub_lock);
4047
4048 return 0;
4049}
Stefan Behrens1623ede2012-03-27 14:21:26 -04004050
Arne Jansena2de7332011-03-08 14:14:00 +01004051int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
4052 struct btrfs_scrub_progress *progress)
4053{
4054 struct btrfs_device *dev;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004055 struct scrub_ctx *sctx = NULL;
Arne Jansena2de7332011-03-08 14:14:00 +01004056
4057 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004058 dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +01004059 if (dev)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004060 sctx = dev->scrub_device;
4061 if (sctx)
4062 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01004063 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
4064
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004065 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
Arne Jansena2de7332011-03-08 14:14:00 +01004066}
Stefan Behrensff023aa2012-11-06 11:43:11 +01004067
4068static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4069 u64 extent_logical, u64 extent_len,
4070 u64 *extent_physical,
4071 struct btrfs_device **extent_dev,
4072 int *extent_mirror_num)
4073{
4074 u64 mapped_length;
4075 struct btrfs_bio *bbio = NULL;
4076 int ret;
4077
4078 mapped_length = extent_len;
4079 ret = btrfs_map_block(fs_info, READ, extent_logical,
4080 &mapped_length, &bbio, 0);
4081 if (ret || !bbio || mapped_length < extent_len ||
4082 !bbio->stripes[0].dev->bdev) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08004083 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004084 return;
4085 }
4086
4087 *extent_physical = bbio->stripes[0].physical;
4088 *extent_mirror_num = bbio->mirror_num;
4089 *extent_dev = bbio->stripes[0].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08004090 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004091}
4092
4093static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
4094 struct scrub_wr_ctx *wr_ctx,
4095 struct btrfs_fs_info *fs_info,
4096 struct btrfs_device *dev,
4097 int is_dev_replace)
4098{
4099 WARN_ON(wr_ctx->wr_curr_bio != NULL);
4100
4101 mutex_init(&wr_ctx->wr_lock);
4102 wr_ctx->wr_curr_bio = NULL;
4103 if (!is_dev_replace)
4104 return 0;
4105
4106 WARN_ON(!dev->bdev);
Kent Overstreetb54ffb72015-05-19 14:31:01 +02004107 wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004108 wr_ctx->tgtdev = dev;
4109 atomic_set(&wr_ctx->flush_all_writes, 0);
4110 return 0;
4111}
4112
4113static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
4114{
4115 mutex_lock(&wr_ctx->wr_lock);
4116 kfree(wr_ctx->wr_curr_bio);
4117 wr_ctx->wr_curr_bio = NULL;
4118 mutex_unlock(&wr_ctx->wr_lock);
4119}
4120
4121static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4122 int mirror_num, u64 physical_for_dev_replace)
4123{
4124 struct scrub_copy_nocow_ctx *nocow_ctx;
4125 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
4126
4127 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4128 if (!nocow_ctx) {
4129 spin_lock(&sctx->stat_lock);
4130 sctx->stat.malloc_errors++;
4131 spin_unlock(&sctx->stat_lock);
4132 return -ENOMEM;
4133 }
4134
4135 scrub_pending_trans_workers_inc(sctx);
4136
4137 nocow_ctx->sctx = sctx;
4138 nocow_ctx->logical = logical;
4139 nocow_ctx->len = len;
4140 nocow_ctx->mirror_num = mirror_num;
4141 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
Liu Bo9e0af232014-08-15 23:36:53 +08004142 btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4143 copy_nocow_pages_worker, NULL, NULL);
Josef Bacik652f25a2013-09-12 16:58:28 -04004144 INIT_LIST_HEAD(&nocow_ctx->inodes);
Qu Wenruo0339ef22014-02-28 10:46:17 +08004145 btrfs_queue_work(fs_info->scrub_nocow_workers,
4146 &nocow_ctx->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004147
4148 return 0;
4149}
4150
Josef Bacik652f25a2013-09-12 16:58:28 -04004151static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4152{
4153 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4154 struct scrub_nocow_inode *nocow_inode;
4155
4156 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4157 if (!nocow_inode)
4158 return -ENOMEM;
4159 nocow_inode->inum = inum;
4160 nocow_inode->offset = offset;
4161 nocow_inode->root = root;
4162 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4163 return 0;
4164}
4165
4166#define COPY_COMPLETE 1
4167
Stefan Behrensff023aa2012-11-06 11:43:11 +01004168static void copy_nocow_pages_worker(struct btrfs_work *work)
4169{
4170 struct scrub_copy_nocow_ctx *nocow_ctx =
4171 container_of(work, struct scrub_copy_nocow_ctx, work);
4172 struct scrub_ctx *sctx = nocow_ctx->sctx;
4173 u64 logical = nocow_ctx->logical;
4174 u64 len = nocow_ctx->len;
4175 int mirror_num = nocow_ctx->mirror_num;
4176 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4177 int ret;
4178 struct btrfs_trans_handle *trans = NULL;
4179 struct btrfs_fs_info *fs_info;
4180 struct btrfs_path *path;
4181 struct btrfs_root *root;
4182 int not_written = 0;
4183
4184 fs_info = sctx->dev_root->fs_info;
4185 root = fs_info->extent_root;
4186
4187 path = btrfs_alloc_path();
4188 if (!path) {
4189 spin_lock(&sctx->stat_lock);
4190 sctx->stat.malloc_errors++;
4191 spin_unlock(&sctx->stat_lock);
4192 not_written = 1;
4193 goto out;
4194 }
4195
4196 trans = btrfs_join_transaction(root);
4197 if (IS_ERR(trans)) {
4198 not_written = 1;
4199 goto out;
4200 }
4201
4202 ret = iterate_inodes_from_logical(logical, fs_info, path,
Josef Bacik652f25a2013-09-12 16:58:28 -04004203 record_inode_for_nocow, nocow_ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004204 if (ret != 0 && ret != -ENOENT) {
Frank Holtonefe120a2013-12-20 11:37:06 -05004205 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
4206 "phys %llu, len %llu, mir %u, ret %d",
Geert Uytterhoeven118a0a22013-08-20 13:20:10 +02004207 logical, physical_for_dev_replace, len, mirror_num,
4208 ret);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004209 not_written = 1;
4210 goto out;
4211 }
4212
Josef Bacik652f25a2013-09-12 16:58:28 -04004213 btrfs_end_transaction(trans, root);
4214 trans = NULL;
4215 while (!list_empty(&nocow_ctx->inodes)) {
4216 struct scrub_nocow_inode *entry;
4217 entry = list_first_entry(&nocow_ctx->inodes,
4218 struct scrub_nocow_inode,
4219 list);
4220 list_del_init(&entry->list);
4221 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4222 entry->root, nocow_ctx);
4223 kfree(entry);
4224 if (ret == COPY_COMPLETE) {
4225 ret = 0;
4226 break;
4227 } else if (ret) {
4228 break;
4229 }
4230 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004231out:
Josef Bacik652f25a2013-09-12 16:58:28 -04004232 while (!list_empty(&nocow_ctx->inodes)) {
4233 struct scrub_nocow_inode *entry;
4234 entry = list_first_entry(&nocow_ctx->inodes,
4235 struct scrub_nocow_inode,
4236 list);
4237 list_del_init(&entry->list);
4238 kfree(entry);
4239 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004240 if (trans && !IS_ERR(trans))
4241 btrfs_end_transaction(trans, root);
4242 if (not_written)
4243 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4244 num_uncorrectable_read_errors);
4245
4246 btrfs_free_path(path);
4247 kfree(nocow_ctx);
4248
4249 scrub_pending_trans_workers_dec(sctx);
4250}
4251
Gui Hecheng32159242014-11-10 15:36:08 +08004252static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4253 u64 logical)
4254{
4255 struct extent_state *cached_state = NULL;
4256 struct btrfs_ordered_extent *ordered;
4257 struct extent_io_tree *io_tree;
4258 struct extent_map *em;
4259 u64 lockstart = start, lockend = start + len - 1;
4260 int ret = 0;
4261
4262 io_tree = &BTRFS_I(inode)->io_tree;
4263
David Sterbaff13db42015-12-03 14:30:40 +01004264 lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
Gui Hecheng32159242014-11-10 15:36:08 +08004265 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4266 if (ordered) {
4267 btrfs_put_ordered_extent(ordered);
4268 ret = 1;
4269 goto out_unlock;
4270 }
4271
4272 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4273 if (IS_ERR(em)) {
4274 ret = PTR_ERR(em);
4275 goto out_unlock;
4276 }
4277
4278 /*
4279 * This extent does not actually cover the logical extent anymore,
4280 * move on to the next inode.
4281 */
4282 if (em->block_start > logical ||
4283 em->block_start + em->block_len < logical + len) {
4284 free_extent_map(em);
4285 ret = 1;
4286 goto out_unlock;
4287 }
4288 free_extent_map(em);
4289
4290out_unlock:
4291 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4292 GFP_NOFS);
4293 return ret;
4294}
4295
Josef Bacik652f25a2013-09-12 16:58:28 -04004296static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4297 struct scrub_copy_nocow_ctx *nocow_ctx)
Stefan Behrensff023aa2012-11-06 11:43:11 +01004298{
Miao Xie826aa0a2013-06-27 18:50:59 +08004299 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004300 struct btrfs_key key;
Miao Xie826aa0a2013-06-27 18:50:59 +08004301 struct inode *inode;
4302 struct page *page;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004303 struct btrfs_root *local_root;
Josef Bacik652f25a2013-09-12 16:58:28 -04004304 struct extent_io_tree *io_tree;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004305 u64 physical_for_dev_replace;
Gui Hecheng32159242014-11-10 15:36:08 +08004306 u64 nocow_ctx_logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004307 u64 len = nocow_ctx->len;
Miao Xie826aa0a2013-06-27 18:50:59 +08004308 unsigned long index;
Liu Bo6f1c3602013-01-29 03:22:10 +00004309 int srcu_index;
Josef Bacik652f25a2013-09-12 16:58:28 -04004310 int ret = 0;
4311 int err = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004312
4313 key.objectid = root;
4314 key.type = BTRFS_ROOT_ITEM_KEY;
4315 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +00004316
4317 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4318
Stefan Behrensff023aa2012-11-06 11:43:11 +01004319 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
Liu Bo6f1c3602013-01-29 03:22:10 +00004320 if (IS_ERR(local_root)) {
4321 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004322 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +00004323 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004324
4325 key.type = BTRFS_INODE_ITEM_KEY;
4326 key.objectid = inum;
4327 key.offset = 0;
4328 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
Liu Bo6f1c3602013-01-29 03:22:10 +00004329 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004330 if (IS_ERR(inode))
4331 return PTR_ERR(inode);
4332
Miao Xieedd14002013-06-27 18:51:00 +08004333 /* Avoid truncate/dio/punch hole.. */
Al Viro59551022016-01-22 15:40:57 -05004334 inode_lock(inode);
Miao Xieedd14002013-06-27 18:51:00 +08004335 inode_dio_wait(inode);
4336
Stefan Behrensff023aa2012-11-06 11:43:11 +01004337 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -04004338 io_tree = &BTRFS_I(inode)->io_tree;
Gui Hecheng32159242014-11-10 15:36:08 +08004339 nocow_ctx_logical = nocow_ctx->logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004340
Gui Hecheng32159242014-11-10 15:36:08 +08004341 ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4342 if (ret) {
4343 ret = ret > 0 ? 0 : ret;
4344 goto out;
Josef Bacik652f25a2013-09-12 16:58:28 -04004345 }
4346
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004347 while (len >= PAGE_SIZE) {
4348 index = offset >> PAGE_SHIFT;
Miao Xieedd14002013-06-27 18:51:00 +08004349again:
Stefan Behrensff023aa2012-11-06 11:43:11 +01004350 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4351 if (!page) {
Frank Holtonefe120a2013-12-20 11:37:06 -05004352 btrfs_err(fs_info, "find_or_create_page() failed");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004353 ret = -ENOMEM;
Miao Xie826aa0a2013-06-27 18:50:59 +08004354 goto out;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004355 }
4356
4357 if (PageUptodate(page)) {
4358 if (PageDirty(page))
4359 goto next_page;
4360 } else {
4361 ClearPageError(page);
Gui Hecheng32159242014-11-10 15:36:08 +08004362 err = extent_read_full_page(io_tree, page,
Josef Bacik652f25a2013-09-12 16:58:28 -04004363 btrfs_get_extent,
4364 nocow_ctx->mirror_num);
Miao Xie826aa0a2013-06-27 18:50:59 +08004365 if (err) {
4366 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004367 goto next_page;
4368 }
Miao Xieedd14002013-06-27 18:51:00 +08004369
Miao Xie26b258912013-06-27 18:50:58 +08004370 lock_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004371 /*
4372 * If the page has been remove from the page cache,
4373 * the data on it is meaningless, because it may be
4374 * old one, the new data may be written into the new
4375 * page in the page cache.
4376 */
4377 if (page->mapping != inode->i_mapping) {
Josef Bacik652f25a2013-09-12 16:58:28 -04004378 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004379 put_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004380 goto again;
4381 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004382 if (!PageUptodate(page)) {
4383 ret = -EIO;
4384 goto next_page;
4385 }
4386 }
Gui Hecheng32159242014-11-10 15:36:08 +08004387
4388 ret = check_extent_to_block(inode, offset, len,
4389 nocow_ctx_logical);
4390 if (ret) {
4391 ret = ret > 0 ? 0 : ret;
4392 goto next_page;
4393 }
4394
Miao Xie826aa0a2013-06-27 18:50:59 +08004395 err = write_page_nocow(nocow_ctx->sctx,
4396 physical_for_dev_replace, page);
4397 if (err)
4398 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004399next_page:
Miao Xie826aa0a2013-06-27 18:50:59 +08004400 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004401 put_page(page);
Miao Xie826aa0a2013-06-27 18:50:59 +08004402
4403 if (ret)
4404 break;
4405
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004406 offset += PAGE_SIZE;
4407 physical_for_dev_replace += PAGE_SIZE;
4408 nocow_ctx_logical += PAGE_SIZE;
4409 len -= PAGE_SIZE;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004410 }
Josef Bacik652f25a2013-09-12 16:58:28 -04004411 ret = COPY_COMPLETE;
Miao Xie826aa0a2013-06-27 18:50:59 +08004412out:
Al Viro59551022016-01-22 15:40:57 -05004413 inode_unlock(inode);
Miao Xie826aa0a2013-06-27 18:50:59 +08004414 iput(inode);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004415 return ret;
4416}
4417
4418static int write_page_nocow(struct scrub_ctx *sctx,
4419 u64 physical_for_dev_replace, struct page *page)
4420{
4421 struct bio *bio;
4422 struct btrfs_device *dev;
4423 int ret;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004424
4425 dev = sctx->wr_ctx.tgtdev;
4426 if (!dev)
4427 return -EIO;
4428 if (!dev->bdev) {
David Sterba94647322015-10-08 11:01:36 +02004429 btrfs_warn_rl(dev->dev_root->fs_info,
4430 "scrub write_page_nocow(bdev == NULL) is unexpected");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004431 return -EIO;
4432 }
Chris Mason9be33952013-05-17 18:30:14 -04004433 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004434 if (!bio) {
4435 spin_lock(&sctx->stat_lock);
4436 sctx->stat.malloc_errors++;
4437 spin_unlock(&sctx->stat_lock);
4438 return -ENOMEM;
4439 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07004440 bio->bi_iter.bi_size = 0;
4441 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004442 bio->bi_bdev = dev->bdev;
Mike Christie37226b22016-06-05 14:31:52 -05004443 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004444 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4445 if (ret != PAGE_SIZE) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01004446leave_with_eio:
4447 bio_put(bio);
4448 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4449 return -EIO;
4450 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004451
Mike Christie4e49ea42016-06-05 14:31:41 -05004452 if (btrfsic_submit_bio_wait(bio))
Stefan Behrensff023aa2012-11-06 11:43:11 +01004453 goto leave_with_eio;
4454
4455 bio_put(bio);
4456 return 0;
4457}