blob: 894bb2732fcc55ab192d732045ee79033a76afda [file] [log] [blame]
Arne Jansena2de7332011-03-08 14:14:00 +01001/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
Arne Jansena2de7332011-03-08 14:14:00 +01003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
Arne Jansena2de7332011-03-08 14:14:00 +010019#include <linux/blkdev.h>
Jan Schmidt558540c2011-06-13 19:59:12 +020020#include <linux/ratelimit.h>
Arne Jansena2de7332011-03-08 14:14:00 +010021#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
Jan Schmidt0ef8e452011-06-13 20:04:15 +020025#include "transaction.h"
Jan Schmidt558540c2011-06-13 19:59:12 +020026#include "backref.h"
Jan Schmidt5da6fcb2011-08-04 18:11:04 +020027#include "extent_io.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010028#include "check-integrity.h"
Josef Bacik606686e2012-06-04 14:03:51 -040029#include "rcu-string.h"
Arne Jansena2de7332011-03-08 14:14:00 +010030
31/*
32 * This is only the first step towards a full-features scrub. It reads all
33 * extent and super block and verifies the checksums. In case a bad checksum
34 * is found or the extent cannot be read, good data will be written back if
35 * any can be found.
36 *
37 * Future enhancements:
Arne Jansena2de7332011-03-08 14:14:00 +010038 * - In case an unrepairable extent is encountered, track which files are
39 * affected and report them
Arne Jansena2de7332011-03-08 14:14:00 +010040 * - track and record media errors, throw out bad devices
Arne Jansena2de7332011-03-08 14:14:00 +010041 * - add a mode to also read unallocated space
Arne Jansena2de7332011-03-08 14:14:00 +010042 */
43
Stefan Behrensb5d67f62012-03-27 14:21:27 -040044struct scrub_block;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010045struct scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +010046
47#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +010048#define SCRUB_BIOS_PER_CTX 16 /* 1 MB per device in flight */
Stefan Behrens7a9e9982012-11-02 14:58:04 +010049
50/*
51 * the following value times PAGE_SIZE needs to be large enough to match the
52 * largest node/leaf/sector size that shall be supported.
53 * Values larger than BTRFS_STRIPE_LEN are not supported.
54 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -040055#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
Arne Jansena2de7332011-03-08 14:14:00 +010056
57struct scrub_page {
Stefan Behrensb5d67f62012-03-27 14:21:27 -040058 struct scrub_block *sblock;
59 struct page *page;
Stefan Behrens442a4f62012-05-25 16:06:08 +020060 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010061 u64 flags; /* extent flags */
62 u64 generation;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040063 u64 logical;
64 u64 physical;
Stefan Behrens7a9e9982012-11-02 14:58:04 +010065 atomic_t ref_count;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040066 struct {
67 unsigned int mirror_num:8;
68 unsigned int have_csum:1;
69 unsigned int io_error:1;
70 };
Arne Jansena2de7332011-03-08 14:14:00 +010071 u8 csum[BTRFS_CSUM_SIZE];
72};
73
74struct scrub_bio {
75 int index;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010076 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +010077 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010078 struct bio *bio;
79 int err;
80 u64 logical;
81 u64 physical;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040082 struct scrub_page *pagev[SCRUB_PAGES_PER_BIO];
83 int page_count;
Arne Jansena2de7332011-03-08 14:14:00 +010084 int next_free;
85 struct btrfs_work work;
86};
87
Stefan Behrensb5d67f62012-03-27 14:21:27 -040088struct scrub_block {
Stefan Behrens7a9e9982012-11-02 14:58:04 +010089 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
Stefan Behrensb5d67f62012-03-27 14:21:27 -040090 int page_count;
91 atomic_t outstanding_pages;
92 atomic_t ref_count; /* free mem on transition to zero */
Stefan Behrensd9d181c2012-11-02 09:58:09 +010093 struct scrub_ctx *sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040094 struct {
95 unsigned int header_error:1;
96 unsigned int checksum_error:1;
97 unsigned int no_io_error_seen:1;
Stefan Behrens442a4f62012-05-25 16:06:08 +020098 unsigned int generation_error:1; /* also sets header_error */
Stefan Behrensb5d67f62012-03-27 14:21:27 -040099 };
100};
101
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100102struct scrub_ctx {
103 struct scrub_bio *bios[SCRUB_BIOS_PER_CTX];
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100104 struct btrfs_root *dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +0100105 int first_free;
106 int curr;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100107 atomic_t bios_in_flight;
108 atomic_t workers_pending;
Arne Jansena2de7332011-03-08 14:14:00 +0100109 spinlock_t list_lock;
110 wait_queue_head_t list_wait;
111 u16 csum_size;
112 struct list_head csum_list;
113 atomic_t cancel_req;
Arne Jansen86287642011-03-23 16:34:19 +0100114 int readonly;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400115 int pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
116 u32 sectorsize;
117 u32 nodesize;
118 u32 leafsize;
Arne Jansena2de7332011-03-08 14:14:00 +0100119 /*
120 * statistics
121 */
122 struct btrfs_scrub_progress stat;
123 spinlock_t stat_lock;
124};
125
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200126struct scrub_fixup_nodatasum {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100127 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100128 struct btrfs_device *dev;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200129 u64 logical;
130 struct btrfs_root *root;
131 struct btrfs_work work;
132 int mirror_num;
133};
134
Jan Schmidt558540c2011-06-13 19:59:12 +0200135struct scrub_warning {
136 struct btrfs_path *path;
137 u64 extent_item_size;
138 char *scratch_buf;
139 char *msg_buf;
140 const char *errstr;
141 sector_t sector;
142 u64 logical;
143 struct btrfs_device *dev;
144 int msg_bufsize;
145 int scratch_bufsize;
146};
147
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400148
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100149static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
150static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
151static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
152static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400153static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100154static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100155 struct btrfs_fs_info *fs_info,
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400156 u64 length, u64 logical,
157 struct scrub_block *sblock);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100158static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
159 struct scrub_block *sblock, int is_metadata,
160 int have_csum, u8 *csum, u64 generation,
161 u16 csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400162static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
163 struct scrub_block *sblock,
164 int is_metadata, int have_csum,
165 const u8 *csum, u64 generation,
166 u16 csum_size);
167static void scrub_complete_bio_end_io(struct bio *bio, int err);
168static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
169 struct scrub_block *sblock_good,
170 int force_write);
171static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
172 struct scrub_block *sblock_good,
173 int page_num, int force_write);
174static int scrub_checksum_data(struct scrub_block *sblock);
175static int scrub_checksum_tree_block(struct scrub_block *sblock);
176static int scrub_checksum_super(struct scrub_block *sblock);
177static void scrub_block_get(struct scrub_block *sblock);
178static void scrub_block_put(struct scrub_block *sblock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100179static void scrub_page_get(struct scrub_page *spage);
180static void scrub_page_put(struct scrub_page *spage);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100181static int scrub_add_page_to_bio(struct scrub_ctx *sctx,
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400182 struct scrub_page *spage);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100183static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100184 u64 physical, struct btrfs_device *dev, u64 flags,
185 u64 gen, int mirror_num, u8 *csum, int force);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400186static void scrub_bio_end_io(struct bio *bio, int err);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400187static void scrub_bio_end_io_worker(struct btrfs_work *work);
188static void scrub_block_complete(struct scrub_block *sblock);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400189
190
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100191static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
192{
193 atomic_inc(&sctx->bios_in_flight);
194}
195
196static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
197{
198 atomic_dec(&sctx->bios_in_flight);
199 wake_up(&sctx->list_wait);
200}
201
202/*
203 * used for workers that require transaction commits (i.e., for the
204 * NOCOW case)
205 */
206static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
207{
208 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
209
210 /*
211 * increment scrubs_running to prevent cancel requests from
212 * completing as long as a worker is running. we must also
213 * increment scrubs_paused to prevent deadlocking on pause
214 * requests used for transactions commits (as the worker uses a
215 * transaction context). it is safe to regard the worker
216 * as paused for all matters practical. effectively, we only
217 * avoid cancellation requests from completing.
218 */
219 mutex_lock(&fs_info->scrub_lock);
220 atomic_inc(&fs_info->scrubs_running);
221 atomic_inc(&fs_info->scrubs_paused);
222 mutex_unlock(&fs_info->scrub_lock);
223 atomic_inc(&sctx->workers_pending);
224}
225
226/* used for workers that require transaction commits */
227static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
228{
229 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
230
231 /*
232 * see scrub_pending_trans_workers_inc() why we're pretending
233 * to be paused in the scrub counters
234 */
235 mutex_lock(&fs_info->scrub_lock);
236 atomic_dec(&fs_info->scrubs_running);
237 atomic_dec(&fs_info->scrubs_paused);
238 mutex_unlock(&fs_info->scrub_lock);
239 atomic_dec(&sctx->workers_pending);
240 wake_up(&fs_info->scrub_pause_wait);
241 wake_up(&sctx->list_wait);
242}
243
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100244static void scrub_free_csums(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100245{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100246 while (!list_empty(&sctx->csum_list)) {
Arne Jansena2de7332011-03-08 14:14:00 +0100247 struct btrfs_ordered_sum *sum;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100248 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +0100249 struct btrfs_ordered_sum, list);
250 list_del(&sum->list);
251 kfree(sum);
252 }
253}
254
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100255static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100256{
257 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100258
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100259 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100260 return;
261
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400262 /* this can happen when scrub is cancelled */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100263 if (sctx->curr != -1) {
264 struct scrub_bio *sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400265
266 for (i = 0; i < sbio->page_count; i++) {
267 BUG_ON(!sbio->pagev[i]);
268 BUG_ON(!sbio->pagev[i]->page);
269 scrub_block_put(sbio->pagev[i]->sblock);
270 }
271 bio_put(sbio->bio);
272 }
273
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100274 for (i = 0; i < SCRUB_BIOS_PER_CTX; ++i) {
275 struct scrub_bio *sbio = sctx->bios[i];
Arne Jansena2de7332011-03-08 14:14:00 +0100276
277 if (!sbio)
278 break;
Arne Jansena2de7332011-03-08 14:14:00 +0100279 kfree(sbio);
280 }
281
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100282 scrub_free_csums(sctx);
283 kfree(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100284}
285
286static noinline_for_stack
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100287struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev)
Arne Jansena2de7332011-03-08 14:14:00 +0100288{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100289 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100290 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100291 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400292 int pages_per_bio;
Arne Jansena2de7332011-03-08 14:14:00 +0100293
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400294 pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
295 bio_get_nr_vecs(dev->bdev));
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100296 sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
297 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100298 goto nomem;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100299 sctx->pages_per_bio = pages_per_bio;
300 sctx->curr = -1;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100301 sctx->dev_root = dev->dev_root;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100302 for (i = 0; i < SCRUB_BIOS_PER_CTX; ++i) {
Arne Jansena2de7332011-03-08 14:14:00 +0100303 struct scrub_bio *sbio;
304
305 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
306 if (!sbio)
307 goto nomem;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100308 sctx->bios[i] = sbio;
Arne Jansena2de7332011-03-08 14:14:00 +0100309
Arne Jansena2de7332011-03-08 14:14:00 +0100310 sbio->index = i;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100311 sbio->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400312 sbio->page_count = 0;
313 sbio->work.func = scrub_bio_end_io_worker;
Arne Jansena2de7332011-03-08 14:14:00 +0100314
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100315 if (i != SCRUB_BIOS_PER_CTX - 1)
316 sctx->bios[i]->next_free = i + 1;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200317 else
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100318 sctx->bios[i]->next_free = -1;
Arne Jansena2de7332011-03-08 14:14:00 +0100319 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100320 sctx->first_free = 0;
321 sctx->nodesize = dev->dev_root->nodesize;
322 sctx->leafsize = dev->dev_root->leafsize;
323 sctx->sectorsize = dev->dev_root->sectorsize;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100324 atomic_set(&sctx->bios_in_flight, 0);
325 atomic_set(&sctx->workers_pending, 0);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100326 atomic_set(&sctx->cancel_req, 0);
327 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
328 INIT_LIST_HEAD(&sctx->csum_list);
Arne Jansena2de7332011-03-08 14:14:00 +0100329
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100330 spin_lock_init(&sctx->list_lock);
331 spin_lock_init(&sctx->stat_lock);
332 init_waitqueue_head(&sctx->list_wait);
333 return sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100334
335nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100336 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100337 return ERR_PTR(-ENOMEM);
338}
339
Jan Schmidt558540c2011-06-13 19:59:12 +0200340static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
341{
342 u64 isize;
343 u32 nlink;
344 int ret;
345 int i;
346 struct extent_buffer *eb;
347 struct btrfs_inode_item *inode_item;
348 struct scrub_warning *swarn = ctx;
349 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
350 struct inode_fs_paths *ipath = NULL;
351 struct btrfs_root *local_root;
352 struct btrfs_key root_key;
353
354 root_key.objectid = root;
355 root_key.type = BTRFS_ROOT_ITEM_KEY;
356 root_key.offset = (u64)-1;
357 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
358 if (IS_ERR(local_root)) {
359 ret = PTR_ERR(local_root);
360 goto err;
361 }
362
363 ret = inode_item_info(inum, 0, local_root, swarn->path);
364 if (ret) {
365 btrfs_release_path(swarn->path);
366 goto err;
367 }
368
369 eb = swarn->path->nodes[0];
370 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
371 struct btrfs_inode_item);
372 isize = btrfs_inode_size(eb, inode_item);
373 nlink = btrfs_inode_nlink(eb, inode_item);
374 btrfs_release_path(swarn->path);
375
376 ipath = init_ipath(4096, local_root, swarn->path);
Dan Carpenter26bdef52011-11-16 11:28:01 +0300377 if (IS_ERR(ipath)) {
378 ret = PTR_ERR(ipath);
379 ipath = NULL;
380 goto err;
381 }
Jan Schmidt558540c2011-06-13 19:59:12 +0200382 ret = paths_from_inode(inum, ipath);
383
384 if (ret < 0)
385 goto err;
386
387 /*
388 * we deliberately ignore the bit ipath might have been too small to
389 * hold all of the paths here
390 */
391 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
Josef Bacik606686e2012-06-04 14:03:51 -0400392 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200393 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
394 "length %llu, links %u (path: %s)\n", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400395 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200396 (unsigned long long)swarn->sector, root, inum, offset,
397 min(isize - offset, (u64)PAGE_SIZE), nlink,
Jeff Mahoney745c4d82011-11-20 07:31:57 -0500398 (char *)(unsigned long)ipath->fspath->val[i]);
Jan Schmidt558540c2011-06-13 19:59:12 +0200399
400 free_ipath(ipath);
401 return 0;
402
403err:
Josef Bacik606686e2012-06-04 14:03:51 -0400404 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200405 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
406 "resolving failed with ret=%d\n", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400407 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200408 (unsigned long long)swarn->sector, root, inum, offset, ret);
409
410 free_ipath(ipath);
411 return 0;
412}
413
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400414static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
Jan Schmidt558540c2011-06-13 19:59:12 +0200415{
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100416 struct btrfs_device *dev;
417 struct btrfs_fs_info *fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200418 struct btrfs_path *path;
419 struct btrfs_key found_key;
420 struct extent_buffer *eb;
421 struct btrfs_extent_item *ei;
422 struct scrub_warning swarn;
Jan Schmidt558540c2011-06-13 19:59:12 +0200423 unsigned long ptr = 0;
Jan Schmidt4692cf52011-12-02 14:56:41 +0100424 u64 extent_item_pos;
Liu Bo69917e42012-09-07 20:01:28 -0600425 u64 flags = 0;
426 u64 ref_root;
427 u32 item_size;
428 u8 ref_level;
429 const int bufsize = 4096;
430 int ret;
Jan Schmidt558540c2011-06-13 19:59:12 +0200431
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100432 WARN_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100433 dev = sblock->pagev[0]->dev;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100434 fs_info = sblock->sctx->dev_root->fs_info;
435
Jan Schmidt558540c2011-06-13 19:59:12 +0200436 path = btrfs_alloc_path();
437
438 swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
439 swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100440 swarn.sector = (sblock->pagev[0]->physical) >> 9;
441 swarn.logical = sblock->pagev[0]->logical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200442 swarn.errstr = errstr;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100443 swarn.dev = NULL;
Jan Schmidt558540c2011-06-13 19:59:12 +0200444 swarn.msg_bufsize = bufsize;
445 swarn.scratch_bufsize = bufsize;
446
447 if (!path || !swarn.scratch_buf || !swarn.msg_buf)
448 goto out;
449
Liu Bo69917e42012-09-07 20:01:28 -0600450 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
451 &flags);
Jan Schmidt558540c2011-06-13 19:59:12 +0200452 if (ret < 0)
453 goto out;
454
Jan Schmidt4692cf52011-12-02 14:56:41 +0100455 extent_item_pos = swarn.logical - found_key.objectid;
Jan Schmidt558540c2011-06-13 19:59:12 +0200456 swarn.extent_item_size = found_key.offset;
457
458 eb = path->nodes[0];
459 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
460 item_size = btrfs_item_size_nr(eb, path->slots[0]);
Jan Schmidt4692cf52011-12-02 14:56:41 +0100461 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200462
Liu Bo69917e42012-09-07 20:01:28 -0600463 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Jan Schmidt558540c2011-06-13 19:59:12 +0200464 do {
465 ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
466 &ref_root, &ref_level);
Josef Bacik606686e2012-06-04 14:03:51 -0400467 printk_in_rcu(KERN_WARNING
Stefan Behrens1623ede2012-03-27 14:21:26 -0400468 "btrfs: %s at logical %llu on dev %s, "
Jan Schmidt558540c2011-06-13 19:59:12 +0200469 "sector %llu: metadata %s (level %d) in tree "
Josef Bacik606686e2012-06-04 14:03:51 -0400470 "%llu\n", errstr, swarn.logical,
471 rcu_str_deref(dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200472 (unsigned long long)swarn.sector,
473 ref_level ? "node" : "leaf",
474 ret < 0 ? -1 : ref_level,
475 ret < 0 ? -1 : ref_root);
476 } while (ret != 1);
477 } else {
478 swarn.path = path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100479 swarn.dev = dev;
Jan Schmidt7a3ae2f2012-03-23 17:32:28 +0100480 iterate_extent_inodes(fs_info, found_key.objectid,
481 extent_item_pos, 1,
Jan Schmidt558540c2011-06-13 19:59:12 +0200482 scrub_print_warning_inode, &swarn);
483 }
484
485out:
486 btrfs_free_path(path);
487 kfree(swarn.scratch_buf);
488 kfree(swarn.msg_buf);
489}
490
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200491static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
492{
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200493 struct page *page = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200494 unsigned long index;
495 struct scrub_fixup_nodatasum *fixup = ctx;
496 int ret;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200497 int corrected = 0;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200498 struct btrfs_key key;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200499 struct inode *inode = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200500 u64 end = offset + PAGE_SIZE - 1;
501 struct btrfs_root *local_root;
502
503 key.objectid = root;
504 key.type = BTRFS_ROOT_ITEM_KEY;
505 key.offset = (u64)-1;
506 local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
507 if (IS_ERR(local_root))
508 return PTR_ERR(local_root);
509
510 key.type = BTRFS_INODE_ITEM_KEY;
511 key.objectid = inum;
512 key.offset = 0;
513 inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
514 if (IS_ERR(inode))
515 return PTR_ERR(inode);
516
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200517 index = offset >> PAGE_CACHE_SHIFT;
518
519 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200520 if (!page) {
521 ret = -ENOMEM;
522 goto out;
523 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200524
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200525 if (PageUptodate(page)) {
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100526 struct btrfs_fs_info *fs_info;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200527 if (PageDirty(page)) {
528 /*
529 * we need to write the data to the defect sector. the
530 * data that was in that sector is not in memory,
531 * because the page was modified. we must not write the
532 * modified page to that sector.
533 *
534 * TODO: what could be done here: wait for the delalloc
535 * runner to write out that page (might involve
536 * COW) and see whether the sector is still
537 * referenced afterwards.
538 *
539 * For the meantime, we'll treat this error
540 * incorrectable, although there is a chance that a
541 * later scrub will find the bad sector again and that
542 * there's no dirty page in memory, then.
543 */
544 ret = -EIO;
545 goto out;
546 }
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100547 fs_info = BTRFS_I(inode)->root->fs_info;
548 ret = repair_io_failure(fs_info, offset, PAGE_SIZE,
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200549 fixup->logical, page,
550 fixup->mirror_num);
551 unlock_page(page);
552 corrected = !ret;
553 } else {
554 /*
555 * we need to get good data first. the general readpage path
556 * will call repair_io_failure for us, we just have to make
557 * sure we read the bad mirror.
558 */
559 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
560 EXTENT_DAMAGED, GFP_NOFS);
561 if (ret) {
562 /* set_extent_bits should give proper error */
563 WARN_ON(ret > 0);
564 if (ret > 0)
565 ret = -EFAULT;
566 goto out;
567 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200568
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200569 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
570 btrfs_get_extent,
571 fixup->mirror_num);
572 wait_on_page_locked(page);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200573
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200574 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
575 end, EXTENT_DAMAGED, 0, NULL);
576 if (!corrected)
577 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
578 EXTENT_DAMAGED, GFP_NOFS);
579 }
580
581out:
582 if (page)
583 put_page(page);
584 if (inode)
585 iput(inode);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200586
587 if (ret < 0)
588 return ret;
589
590 if (ret == 0 && corrected) {
591 /*
592 * we only need to call readpage for one of the inodes belonging
593 * to this extent. so make iterate_extent_inodes stop
594 */
595 return 1;
596 }
597
598 return -EIO;
599}
600
601static void scrub_fixup_nodatasum(struct btrfs_work *work)
602{
603 int ret;
604 struct scrub_fixup_nodatasum *fixup;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100605 struct scrub_ctx *sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200606 struct btrfs_trans_handle *trans = NULL;
607 struct btrfs_fs_info *fs_info;
608 struct btrfs_path *path;
609 int uncorrectable = 0;
610
611 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100612 sctx = fixup->sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200613 fs_info = fixup->root->fs_info;
614
615 path = btrfs_alloc_path();
616 if (!path) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100617 spin_lock(&sctx->stat_lock);
618 ++sctx->stat.malloc_errors;
619 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200620 uncorrectable = 1;
621 goto out;
622 }
623
624 trans = btrfs_join_transaction(fixup->root);
625 if (IS_ERR(trans)) {
626 uncorrectable = 1;
627 goto out;
628 }
629
630 /*
631 * the idea is to trigger a regular read through the standard path. we
632 * read a page from the (failed) logical address by specifying the
633 * corresponding copynum of the failed sector. thus, that readpage is
634 * expected to fail.
635 * that is the point where on-the-fly error correction will kick in
636 * (once it's finished) and rewrite the failed sector if a good copy
637 * can be found.
638 */
639 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
640 path, scrub_fixup_readpage,
641 fixup);
642 if (ret < 0) {
643 uncorrectable = 1;
644 goto out;
645 }
646 WARN_ON(ret != 1);
647
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100648 spin_lock(&sctx->stat_lock);
649 ++sctx->stat.corrected_errors;
650 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200651
652out:
653 if (trans && !IS_ERR(trans))
654 btrfs_end_transaction(trans, fixup->root);
655 if (uncorrectable) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100656 spin_lock(&sctx->stat_lock);
657 ++sctx->stat.uncorrectable_errors;
658 spin_unlock(&sctx->stat_lock);
Josef Bacik606686e2012-06-04 14:03:51 -0400659
660 printk_ratelimited_in_rcu(KERN_ERR
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400661 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
Josef Bacik606686e2012-06-04 14:03:51 -0400662 (unsigned long long)fixup->logical,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100663 rcu_str_deref(fixup->dev->name));
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200664 }
665
666 btrfs_free_path(path);
667 kfree(fixup);
668
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100669 scrub_pending_trans_workers_dec(sctx);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200670}
671
Arne Jansena2de7332011-03-08 14:14:00 +0100672/*
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400673 * scrub_handle_errored_block gets called when either verification of the
674 * pages failed or the bio failed to read, e.g. with EIO. In the latter
675 * case, this function handles all pages in the bio, even though only one
676 * may be bad.
677 * The goal of this function is to repair the errored block by using the
678 * contents of one of the mirrors.
Arne Jansena2de7332011-03-08 14:14:00 +0100679 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400680static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
Arne Jansena2de7332011-03-08 14:14:00 +0100681{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100682 struct scrub_ctx *sctx = sblock_to_check->sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100683 struct btrfs_device *dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400684 struct btrfs_fs_info *fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100685 u64 length;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400686 u64 logical;
687 u64 generation;
688 unsigned int failed_mirror_index;
689 unsigned int is_metadata;
690 unsigned int have_csum;
691 u8 *csum;
692 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
693 struct scrub_block *sblock_bad;
Arne Jansena2de7332011-03-08 14:14:00 +0100694 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400695 int mirror_index;
696 int page_num;
697 int success;
698 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
699 DEFAULT_RATELIMIT_BURST);
Arne Jansena2de7332011-03-08 14:14:00 +0100700
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400701 BUG_ON(sblock_to_check->page_count < 1);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100702 fs_info = sctx->dev_root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400703 length = sblock_to_check->page_count * PAGE_SIZE;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100704 logical = sblock_to_check->pagev[0]->logical;
705 generation = sblock_to_check->pagev[0]->generation;
706 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
707 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
708 is_metadata = !(sblock_to_check->pagev[0]->flags &
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400709 BTRFS_EXTENT_FLAG_DATA);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100710 have_csum = sblock_to_check->pagev[0]->have_csum;
711 csum = sblock_to_check->pagev[0]->csum;
712 dev = sblock_to_check->pagev[0]->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400713
714 /*
715 * read all mirrors one after the other. This includes to
716 * re-read the extent or metadata block that failed (that was
717 * the cause that this fixup code is called) another time,
718 * page by page this time in order to know which pages
719 * caused I/O errors and which ones are good (for all mirrors).
720 * It is the goal to handle the situation when more than one
721 * mirror contains I/O errors, but the errors do not
722 * overlap, i.e. the data can be repaired by selecting the
723 * pages from those mirrors without I/O error on the
724 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
725 * would be that mirror #1 has an I/O error on the first page,
726 * the second page is good, and mirror #2 has an I/O error on
727 * the second page, but the first page is good.
728 * Then the first page of the first mirror can be repaired by
729 * taking the first page of the second mirror, and the
730 * second page of the second mirror can be repaired by
731 * copying the contents of the 2nd page of the 1st mirror.
732 * One more note: if the pages of one mirror contain I/O
733 * errors, the checksum cannot be verified. In order to get
734 * the best data for repairing, the first attempt is to find
735 * a mirror without I/O errors and with a validated checksum.
736 * Only if this is not possible, the pages are picked from
737 * mirrors with I/O errors without considering the checksum.
738 * If the latter is the case, at the end, the checksum of the
739 * repaired area is verified in order to correctly maintain
740 * the statistics.
741 */
742
743 sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
744 sizeof(*sblocks_for_recheck),
745 GFP_NOFS);
746 if (!sblocks_for_recheck) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100747 spin_lock(&sctx->stat_lock);
748 sctx->stat.malloc_errors++;
749 sctx->stat.read_errors++;
750 sctx->stat.uncorrectable_errors++;
751 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100752 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400753 goto out;
754 }
755
756 /* setup the context, map the logical blocks and alloc the pages */
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100757 ret = scrub_setup_recheck_block(sctx, fs_info, length,
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400758 logical, sblocks_for_recheck);
759 if (ret) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100760 spin_lock(&sctx->stat_lock);
761 sctx->stat.read_errors++;
762 sctx->stat.uncorrectable_errors++;
763 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100764 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400765 goto out;
766 }
767 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
768 sblock_bad = sblocks_for_recheck + failed_mirror_index;
769
770 /* build and submit the bios for the failed mirror, check checksums */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100771 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
772 csum, generation, sctx->csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400773
774 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
775 sblock_bad->no_io_error_seen) {
776 /*
777 * the error disappeared after reading page by page, or
778 * the area was part of a huge bio and other parts of the
779 * bio caused I/O errors, or the block layer merged several
780 * read requests into one and the error is caused by a
781 * different bio (usually one of the two latter cases is
782 * the cause)
783 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100784 spin_lock(&sctx->stat_lock);
785 sctx->stat.unverified_errors++;
786 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400787
788 goto out;
789 }
790
791 if (!sblock_bad->no_io_error_seen) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100792 spin_lock(&sctx->stat_lock);
793 sctx->stat.read_errors++;
794 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400795 if (__ratelimit(&_rs))
796 scrub_print_warning("i/o error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100797 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400798 } else if (sblock_bad->checksum_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100799 spin_lock(&sctx->stat_lock);
800 sctx->stat.csum_errors++;
801 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400802 if (__ratelimit(&_rs))
803 scrub_print_warning("checksum error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100804 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200805 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400806 } else if (sblock_bad->header_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100807 spin_lock(&sctx->stat_lock);
808 sctx->stat.verify_errors++;
809 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400810 if (__ratelimit(&_rs))
811 scrub_print_warning("checksum/header error",
812 sblock_to_check);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200813 if (sblock_bad->generation_error)
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100814 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200815 BTRFS_DEV_STAT_GENERATION_ERRS);
816 else
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100817 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200818 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400819 }
820
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100821 if (sctx->readonly)
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400822 goto did_not_correct_error;
823
824 if (!is_metadata && !have_csum) {
825 struct scrub_fixup_nodatasum *fixup_nodatasum;
826
827 /*
828 * !is_metadata and !have_csum, this means that the data
829 * might not be COW'ed, that it might be modified
830 * concurrently. The general strategy to work on the
831 * commit root does not help in the case when COW is not
832 * used.
833 */
834 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
835 if (!fixup_nodatasum)
836 goto did_not_correct_error;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100837 fixup_nodatasum->sctx = sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100838 fixup_nodatasum->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400839 fixup_nodatasum->logical = logical;
840 fixup_nodatasum->root = fs_info->extent_root;
841 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100842 scrub_pending_trans_workers_inc(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400843 fixup_nodatasum->work.func = scrub_fixup_nodatasum;
844 btrfs_queue_worker(&fs_info->scrub_workers,
845 &fixup_nodatasum->work);
Arne Jansena2de7332011-03-08 14:14:00 +0100846 goto out;
847 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400848
849 /*
850 * now build and submit the bios for the other mirrors, check
Stefan Behrenscb2ced72012-11-02 16:14:21 +0100851 * checksums.
852 * First try to pick the mirror which is completely without I/O
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400853 * errors and also does not have a checksum error.
854 * If one is found, and if a checksum is present, the full block
855 * that is known to contain an error is rewritten. Afterwards
856 * the block is known to be corrected.
857 * If a mirror is found which is completely correct, and no
858 * checksum is present, only those pages are rewritten that had
859 * an I/O error in the block to be repaired, since it cannot be
860 * determined, which copy of the other pages is better (and it
861 * could happen otherwise that a correct page would be
862 * overwritten by a bad one).
863 */
864 for (mirror_index = 0;
865 mirror_index < BTRFS_MAX_MIRRORS &&
866 sblocks_for_recheck[mirror_index].page_count > 0;
867 mirror_index++) {
Stefan Behrenscb2ced72012-11-02 16:14:21 +0100868 struct scrub_block *sblock_other;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400869
Stefan Behrenscb2ced72012-11-02 16:14:21 +0100870 if (mirror_index == failed_mirror_index)
871 continue;
872 sblock_other = sblocks_for_recheck + mirror_index;
873
874 /* build and submit the bios, check checksums */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100875 scrub_recheck_block(fs_info, sblock_other, is_metadata,
876 have_csum, csum, generation,
877 sctx->csum_size);
878
879 if (!sblock_other->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400880 !sblock_other->checksum_error &&
881 sblock_other->no_io_error_seen) {
882 int force_write = is_metadata || have_csum;
883
884 ret = scrub_repair_block_from_good_copy(sblock_bad,
885 sblock_other,
886 force_write);
887 if (0 == ret)
888 goto corrected_error;
Arne Jansena2de7332011-03-08 14:14:00 +0100889 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400890 }
891
892 /*
893 * in case of I/O errors in the area that is supposed to be
894 * repaired, continue by picking good copies of those pages.
895 * Select the good pages from mirrors to rewrite bad pages from
896 * the area to fix. Afterwards verify the checksum of the block
897 * that is supposed to be repaired. This verification step is
898 * only done for the purpose of statistic counting and for the
899 * final scrub report, whether errors remain.
900 * A perfect algorithm could make use of the checksum and try
901 * all possible combinations of pages from the different mirrors
902 * until the checksum verification succeeds. For example, when
903 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
904 * of mirror #2 is readable but the final checksum test fails,
905 * then the 2nd page of mirror #3 could be tried, whether now
906 * the final checksum succeedes. But this would be a rare
907 * exception and is therefore not implemented. At least it is
908 * avoided that the good copy is overwritten.
909 * A more useful improvement would be to pick the sectors
910 * without I/O error based on sector sizes (512 bytes on legacy
911 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
912 * mirror could be repaired by taking 512 byte of a different
913 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
914 * area are unreadable.
915 */
916
917 /* can only fix I/O errors from here on */
918 if (sblock_bad->no_io_error_seen)
919 goto did_not_correct_error;
920
921 success = 1;
922 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100923 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400924
925 if (!page_bad->io_error)
926 continue;
927
928 for (mirror_index = 0;
929 mirror_index < BTRFS_MAX_MIRRORS &&
930 sblocks_for_recheck[mirror_index].page_count > 0;
931 mirror_index++) {
932 struct scrub_block *sblock_other = sblocks_for_recheck +
933 mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100934 struct scrub_page *page_other = sblock_other->pagev[
935 page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400936
937 if (!page_other->io_error) {
938 ret = scrub_repair_page_from_good_copy(
939 sblock_bad, sblock_other, page_num, 0);
940 if (0 == ret) {
941 page_bad->io_error = 0;
942 break; /* succeeded for this page */
943 }
Jan Schmidt13db62b2011-06-13 19:56:13 +0200944 }
945 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400946
947 if (page_bad->io_error) {
948 /* did not find a mirror to copy the page from */
949 success = 0;
950 }
951 }
952
953 if (success) {
954 if (is_metadata || have_csum) {
955 /*
956 * need to verify the checksum now that all
957 * sectors on disk are repaired (the write
958 * request for data to be repaired is on its way).
959 * Just be lazy and use scrub_recheck_block()
960 * which re-reads the data before the checksum
961 * is verified, but most likely the data comes out
962 * of the page cache.
963 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100964 scrub_recheck_block(fs_info, sblock_bad,
965 is_metadata, have_csum, csum,
966 generation, sctx->csum_size);
967 if (!sblock_bad->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400968 !sblock_bad->checksum_error &&
969 sblock_bad->no_io_error_seen)
970 goto corrected_error;
971 else
972 goto did_not_correct_error;
973 } else {
974corrected_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100975 spin_lock(&sctx->stat_lock);
976 sctx->stat.corrected_errors++;
977 spin_unlock(&sctx->stat_lock);
Josef Bacik606686e2012-06-04 14:03:51 -0400978 printk_ratelimited_in_rcu(KERN_ERR
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400979 "btrfs: fixed up error at logical %llu on dev %s\n",
Josef Bacik606686e2012-06-04 14:03:51 -0400980 (unsigned long long)logical,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100981 rcu_str_deref(dev->name));
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400982 }
983 } else {
984did_not_correct_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100985 spin_lock(&sctx->stat_lock);
986 sctx->stat.uncorrectable_errors++;
987 spin_unlock(&sctx->stat_lock);
Josef Bacik606686e2012-06-04 14:03:51 -0400988 printk_ratelimited_in_rcu(KERN_ERR
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400989 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
Josef Bacik606686e2012-06-04 14:03:51 -0400990 (unsigned long long)logical,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100991 rcu_str_deref(dev->name));
Arne Jansena2de7332011-03-08 14:14:00 +0100992 }
993
994out:
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400995 if (sblocks_for_recheck) {
996 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
997 mirror_index++) {
998 struct scrub_block *sblock = sblocks_for_recheck +
999 mirror_index;
1000 int page_index;
1001
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001002 for (page_index = 0; page_index < sblock->page_count;
1003 page_index++) {
1004 sblock->pagev[page_index]->sblock = NULL;
1005 scrub_page_put(sblock->pagev[page_index]);
1006 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001007 }
1008 kfree(sblocks_for_recheck);
1009 }
1010
1011 return 0;
Arne Jansena2de7332011-03-08 14:14:00 +01001012}
1013
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001014static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
Stefan Behrens3ec706c2012-11-05 15:46:42 +01001015 struct btrfs_fs_info *fs_info,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001016 u64 length, u64 logical,
1017 struct scrub_block *sblocks_for_recheck)
Arne Jansena2de7332011-03-08 14:14:00 +01001018{
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001019 int page_index;
1020 int mirror_index;
1021 int ret;
1022
1023 /*
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001024 * note: the two members ref_count and outstanding_pages
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001025 * are not used (and not set) in the blocks that are used for
1026 * the recheck procedure
1027 */
1028
1029 page_index = 0;
1030 while (length > 0) {
1031 u64 sublen = min_t(u64, length, PAGE_SIZE);
1032 u64 mapped_length = sublen;
1033 struct btrfs_bio *bbio = NULL;
1034
1035 /*
1036 * with a length of PAGE_SIZE, each returned stripe
1037 * represents one mirror
1038 */
Stefan Behrens3ec706c2012-11-05 15:46:42 +01001039 ret = btrfs_map_block(fs_info, WRITE, logical, &mapped_length,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001040 &bbio, 0);
1041 if (ret || !bbio || mapped_length < sublen) {
1042 kfree(bbio);
1043 return -EIO;
1044 }
1045
1046 BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
1047 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
1048 mirror_index++) {
1049 struct scrub_block *sblock;
1050 struct scrub_page *page;
1051
1052 if (mirror_index >= BTRFS_MAX_MIRRORS)
1053 continue;
1054
1055 sblock = sblocks_for_recheck + mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001056 sblock->sctx = sctx;
1057 page = kzalloc(sizeof(*page), GFP_NOFS);
1058 if (!page) {
1059leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001060 spin_lock(&sctx->stat_lock);
1061 sctx->stat.malloc_errors++;
1062 spin_unlock(&sctx->stat_lock);
Wei Yongjuncf93dcc2012-09-02 07:44:51 -06001063 kfree(bbio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001064 return -ENOMEM;
1065 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001066 scrub_page_get(page);
1067 sblock->pagev[page_index] = page;
1068 page->logical = logical;
1069 page->physical = bbio->stripes[mirror_index].physical;
1070 /* for missing devices, dev->bdev is NULL */
1071 page->dev = bbio->stripes[mirror_index].dev;
1072 page->mirror_num = mirror_index + 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001073 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001074 page->page = alloc_page(GFP_NOFS);
1075 if (!page->page)
1076 goto leave_nomem;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001077 }
1078 kfree(bbio);
1079 length -= sublen;
1080 logical += sublen;
1081 page_index++;
1082 }
1083
1084 return 0;
1085}
1086
1087/*
1088 * this function will check the on disk data for checksum errors, header
1089 * errors and read I/O errors. If any I/O errors happen, the exact pages
1090 * which are errored are marked as being bad. The goal is to enable scrub
1091 * to take those pages that are not errored from all the mirrors so that
1092 * the pages that are errored in the just handled mirror can be repaired.
1093 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001094static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1095 struct scrub_block *sblock, int is_metadata,
1096 int have_csum, u8 *csum, u64 generation,
1097 u16 csum_size)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001098{
1099 int page_num;
1100
1101 sblock->no_io_error_seen = 1;
1102 sblock->header_error = 0;
1103 sblock->checksum_error = 0;
1104
1105 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1106 struct bio *bio;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001107 struct scrub_page *page = sblock->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001108 DECLARE_COMPLETION_ONSTACK(complete);
1109
Stefan Behrens442a4f62012-05-25 16:06:08 +02001110 if (page->dev->bdev == NULL) {
Stefan Behrensea9947b2012-05-04 15:16:07 -04001111 page->io_error = 1;
1112 sblock->no_io_error_seen = 0;
1113 continue;
1114 }
1115
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001116 WARN_ON(!page->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001117 bio = bio_alloc(GFP_NOFS, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001118 if (!bio) {
1119 page->io_error = 1;
1120 sblock->no_io_error_seen = 0;
1121 continue;
1122 }
Stefan Behrens442a4f62012-05-25 16:06:08 +02001123 bio->bi_bdev = page->dev->bdev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001124 bio->bi_sector = page->physical >> 9;
1125 bio->bi_end_io = scrub_complete_bio_end_io;
1126 bio->bi_private = &complete;
1127
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001128 bio_add_page(bio, page->page, PAGE_SIZE, 0);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001129 btrfsic_submit_bio(READ, bio);
1130
1131 /* this will also unplug the queue */
1132 wait_for_completion(&complete);
1133
1134 page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
1135 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1136 sblock->no_io_error_seen = 0;
1137 bio_put(bio);
1138 }
1139
1140 if (sblock->no_io_error_seen)
1141 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1142 have_csum, csum, generation,
1143 csum_size);
1144
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001145 return;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001146}
1147
1148static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1149 struct scrub_block *sblock,
1150 int is_metadata, int have_csum,
1151 const u8 *csum, u64 generation,
1152 u16 csum_size)
1153{
1154 int page_num;
1155 u8 calculated_csum[BTRFS_CSUM_SIZE];
1156 u32 crc = ~(u32)0;
1157 struct btrfs_root *root = fs_info->extent_root;
1158 void *mapped_buffer;
1159
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001160 WARN_ON(!sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001161 if (is_metadata) {
1162 struct btrfs_header *h;
1163
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001164 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001165 h = (struct btrfs_header *)mapped_buffer;
1166
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001167 if (sblock->pagev[0]->logical != le64_to_cpu(h->bytenr) ||
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001168 memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1169 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001170 BTRFS_UUID_SIZE)) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001171 sblock->header_error = 1;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001172 } else if (generation != le64_to_cpu(h->generation)) {
1173 sblock->header_error = 1;
1174 sblock->generation_error = 1;
1175 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001176 csum = h->csum;
1177 } else {
1178 if (!have_csum)
1179 return;
1180
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001181 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001182 }
1183
1184 for (page_num = 0;;) {
1185 if (page_num == 0 && is_metadata)
1186 crc = btrfs_csum_data(root,
1187 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1188 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1189 else
1190 crc = btrfs_csum_data(root, mapped_buffer, crc,
1191 PAGE_SIZE);
1192
Linus Torvalds9613beb2012-03-30 12:44:29 -07001193 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001194 page_num++;
1195 if (page_num >= sblock->page_count)
1196 break;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001197 WARN_ON(!sblock->pagev[page_num]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001198
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001199 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001200 }
1201
1202 btrfs_csum_final(crc, calculated_csum);
1203 if (memcmp(calculated_csum, csum, csum_size))
1204 sblock->checksum_error = 1;
1205}
1206
1207static void scrub_complete_bio_end_io(struct bio *bio, int err)
1208{
1209 complete((struct completion *)bio->bi_private);
1210}
1211
1212static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1213 struct scrub_block *sblock_good,
1214 int force_write)
1215{
1216 int page_num;
1217 int ret = 0;
1218
1219 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1220 int ret_sub;
1221
1222 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1223 sblock_good,
1224 page_num,
1225 force_write);
1226 if (ret_sub)
1227 ret = ret_sub;
1228 }
1229
1230 return ret;
1231}
1232
1233static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1234 struct scrub_block *sblock_good,
1235 int page_num, int force_write)
1236{
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001237 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1238 struct scrub_page *page_good = sblock_good->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001239
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001240 BUG_ON(page_bad->page == NULL);
1241 BUG_ON(page_good->page == NULL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001242 if (force_write || sblock_bad->header_error ||
1243 sblock_bad->checksum_error || page_bad->io_error) {
1244 struct bio *bio;
1245 int ret;
1246 DECLARE_COMPLETION_ONSTACK(complete);
1247
1248 bio = bio_alloc(GFP_NOFS, 1);
Tsutomu Itohe627ee72012-04-12 16:03:56 -04001249 if (!bio)
1250 return -EIO;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001251 bio->bi_bdev = page_bad->dev->bdev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001252 bio->bi_sector = page_bad->physical >> 9;
1253 bio->bi_end_io = scrub_complete_bio_end_io;
1254 bio->bi_private = &complete;
1255
1256 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1257 if (PAGE_SIZE != ret) {
1258 bio_put(bio);
1259 return -EIO;
1260 }
1261 btrfsic_submit_bio(WRITE, bio);
1262
1263 /* this will also unplug the queue */
1264 wait_for_completion(&complete);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001265 if (!bio_flagged(bio, BIO_UPTODATE)) {
1266 btrfs_dev_stat_inc_and_print(page_bad->dev,
1267 BTRFS_DEV_STAT_WRITE_ERRS);
1268 bio_put(bio);
1269 return -EIO;
1270 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001271 bio_put(bio);
1272 }
1273
1274 return 0;
1275}
1276
1277static void scrub_checksum(struct scrub_block *sblock)
1278{
1279 u64 flags;
1280 int ret;
1281
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001282 WARN_ON(sblock->page_count < 1);
1283 flags = sblock->pagev[0]->flags;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001284 ret = 0;
1285 if (flags & BTRFS_EXTENT_FLAG_DATA)
1286 ret = scrub_checksum_data(sblock);
1287 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1288 ret = scrub_checksum_tree_block(sblock);
1289 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1290 (void)scrub_checksum_super(sblock);
1291 else
1292 WARN_ON(1);
1293 if (ret)
1294 scrub_handle_errored_block(sblock);
1295}
1296
1297static int scrub_checksum_data(struct scrub_block *sblock)
1298{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001299 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001300 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001301 u8 *on_disk_csum;
1302 struct page *page;
1303 void *buffer;
Arne Jansena2de7332011-03-08 14:14:00 +01001304 u32 crc = ~(u32)0;
1305 int fail = 0;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001306 struct btrfs_root *root = sctx->dev_root;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001307 u64 len;
1308 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001309
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001310 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001311 if (!sblock->pagev[0]->have_csum)
Arne Jansena2de7332011-03-08 14:14:00 +01001312 return 0;
1313
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001314 on_disk_csum = sblock->pagev[0]->csum;
1315 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001316 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001317
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001318 len = sctx->sectorsize;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001319 index = 0;
1320 for (;;) {
1321 u64 l = min_t(u64, len, PAGE_SIZE);
1322
1323 crc = btrfs_csum_data(root, buffer, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001324 kunmap_atomic(buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001325 len -= l;
1326 if (len == 0)
1327 break;
1328 index++;
1329 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001330 BUG_ON(!sblock->pagev[index]->page);
1331 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001332 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001333 }
1334
Arne Jansena2de7332011-03-08 14:14:00 +01001335 btrfs_csum_final(crc, csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001336 if (memcmp(csum, on_disk_csum, sctx->csum_size))
Arne Jansena2de7332011-03-08 14:14:00 +01001337 fail = 1;
1338
Arne Jansena2de7332011-03-08 14:14:00 +01001339 return fail;
1340}
1341
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001342static int scrub_checksum_tree_block(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001343{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001344 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001345 struct btrfs_header *h;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001346 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01001347 struct btrfs_fs_info *fs_info = root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001348 u8 calculated_csum[BTRFS_CSUM_SIZE];
1349 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1350 struct page *page;
1351 void *mapped_buffer;
1352 u64 mapped_size;
1353 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001354 u32 crc = ~(u32)0;
1355 int fail = 0;
1356 int crc_fail = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001357 u64 len;
1358 int index;
1359
1360 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001361 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001362 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001363 h = (struct btrfs_header *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001364 memcpy(on_disk_csum, h->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001365
1366 /*
1367 * we don't use the getter functions here, as we
1368 * a) don't have an extent buffer and
1369 * b) the page is already kmapped
1370 */
Arne Jansena2de7332011-03-08 14:14:00 +01001371
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001372 if (sblock->pagev[0]->logical != le64_to_cpu(h->bytenr))
Arne Jansena2de7332011-03-08 14:14:00 +01001373 ++fail;
1374
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001375 if (sblock->pagev[0]->generation != le64_to_cpu(h->generation))
Arne Jansena2de7332011-03-08 14:14:00 +01001376 ++fail;
1377
1378 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1379 ++fail;
1380
1381 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1382 BTRFS_UUID_SIZE))
1383 ++fail;
1384
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001385 BUG_ON(sctx->nodesize != sctx->leafsize);
1386 len = sctx->nodesize - BTRFS_CSUM_SIZE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001387 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1388 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1389 index = 0;
1390 for (;;) {
1391 u64 l = min_t(u64, len, mapped_size);
1392
1393 crc = btrfs_csum_data(root, p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001394 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001395 len -= l;
1396 if (len == 0)
1397 break;
1398 index++;
1399 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001400 BUG_ON(!sblock->pagev[index]->page);
1401 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001402 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001403 mapped_size = PAGE_SIZE;
1404 p = mapped_buffer;
1405 }
1406
1407 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001408 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Arne Jansena2de7332011-03-08 14:14:00 +01001409 ++crc_fail;
1410
Arne Jansena2de7332011-03-08 14:14:00 +01001411 return fail || crc_fail;
1412}
1413
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001414static int scrub_checksum_super(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001415{
1416 struct btrfs_super_block *s;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001417 struct scrub_ctx *sctx = sblock->sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001418 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01001419 struct btrfs_fs_info *fs_info = root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001420 u8 calculated_csum[BTRFS_CSUM_SIZE];
1421 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1422 struct page *page;
1423 void *mapped_buffer;
1424 u64 mapped_size;
1425 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001426 u32 crc = ~(u32)0;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001427 int fail_gen = 0;
1428 int fail_cor = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001429 u64 len;
1430 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001431
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001432 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001433 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001434 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001435 s = (struct btrfs_super_block *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001436 memcpy(on_disk_csum, s->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001437
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001438 if (sblock->pagev[0]->logical != le64_to_cpu(s->bytenr))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001439 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001440
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001441 if (sblock->pagev[0]->generation != le64_to_cpu(s->generation))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001442 ++fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01001443
1444 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001445 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001446
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001447 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1448 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1449 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1450 index = 0;
1451 for (;;) {
1452 u64 l = min_t(u64, len, mapped_size);
1453
1454 crc = btrfs_csum_data(root, p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001455 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001456 len -= l;
1457 if (len == 0)
1458 break;
1459 index++;
1460 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001461 BUG_ON(!sblock->pagev[index]->page);
1462 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001463 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001464 mapped_size = PAGE_SIZE;
1465 p = mapped_buffer;
1466 }
1467
1468 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001469 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001470 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001471
Stefan Behrens442a4f62012-05-25 16:06:08 +02001472 if (fail_cor + fail_gen) {
Arne Jansena2de7332011-03-08 14:14:00 +01001473 /*
1474 * if we find an error in a super block, we just report it.
1475 * They will get written with the next transaction commit
1476 * anyway
1477 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001478 spin_lock(&sctx->stat_lock);
1479 ++sctx->stat.super_errors;
1480 spin_unlock(&sctx->stat_lock);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001481 if (fail_cor)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001482 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001483 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1484 else
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001485 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001486 BTRFS_DEV_STAT_GENERATION_ERRS);
Arne Jansena2de7332011-03-08 14:14:00 +01001487 }
1488
Stefan Behrens442a4f62012-05-25 16:06:08 +02001489 return fail_cor + fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01001490}
1491
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001492static void scrub_block_get(struct scrub_block *sblock)
1493{
1494 atomic_inc(&sblock->ref_count);
1495}
1496
1497static void scrub_block_put(struct scrub_block *sblock)
1498{
1499 if (atomic_dec_and_test(&sblock->ref_count)) {
1500 int i;
1501
1502 for (i = 0; i < sblock->page_count; i++)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001503 scrub_page_put(sblock->pagev[i]);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001504 kfree(sblock);
1505 }
1506}
1507
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001508static void scrub_page_get(struct scrub_page *spage)
1509{
1510 atomic_inc(&spage->ref_count);
1511}
1512
1513static void scrub_page_put(struct scrub_page *spage)
1514{
1515 if (atomic_dec_and_test(&spage->ref_count)) {
1516 if (spage->page)
1517 __free_page(spage->page);
1518 kfree(spage);
1519 }
1520}
1521
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001522static void scrub_submit(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +01001523{
1524 struct scrub_bio *sbio;
1525
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001526 if (sctx->curr == -1)
Stefan Behrens1623ede2012-03-27 14:21:26 -04001527 return;
Arne Jansena2de7332011-03-08 14:14:00 +01001528
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001529 sbio = sctx->bios[sctx->curr];
1530 sctx->curr = -1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001531 scrub_pending_bio_inc(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01001532
Stefan Behrens21adbd52011-11-09 13:44:05 +01001533 btrfsic_submit_bio(READ, sbio->bio);
Arne Jansena2de7332011-03-08 14:14:00 +01001534}
1535
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001536static int scrub_add_page_to_bio(struct scrub_ctx *sctx,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001537 struct scrub_page *spage)
Arne Jansena2de7332011-03-08 14:14:00 +01001538{
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001539 struct scrub_block *sblock = spage->sblock;
Arne Jansena2de7332011-03-08 14:14:00 +01001540 struct scrub_bio *sbio;
Arne Jansen69f4cb52011-11-11 08:17:10 -05001541 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +01001542
1543again:
1544 /*
1545 * grab a fresh bio or wait for one to become available
1546 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001547 while (sctx->curr == -1) {
1548 spin_lock(&sctx->list_lock);
1549 sctx->curr = sctx->first_free;
1550 if (sctx->curr != -1) {
1551 sctx->first_free = sctx->bios[sctx->curr]->next_free;
1552 sctx->bios[sctx->curr]->next_free = -1;
1553 sctx->bios[sctx->curr]->page_count = 0;
1554 spin_unlock(&sctx->list_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01001555 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001556 spin_unlock(&sctx->list_lock);
1557 wait_event(sctx->list_wait, sctx->first_free != -1);
Arne Jansena2de7332011-03-08 14:14:00 +01001558 }
1559 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001560 sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001561 if (sbio->page_count == 0) {
Arne Jansen69f4cb52011-11-11 08:17:10 -05001562 struct bio *bio;
1563
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001564 sbio->physical = spage->physical;
1565 sbio->logical = spage->logical;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001566 sbio->dev = spage->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001567 bio = sbio->bio;
1568 if (!bio) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001569 bio = bio_alloc(GFP_NOFS, sctx->pages_per_bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001570 if (!bio)
1571 return -ENOMEM;
1572 sbio->bio = bio;
1573 }
Arne Jansen69f4cb52011-11-11 08:17:10 -05001574
1575 bio->bi_private = sbio;
1576 bio->bi_end_io = scrub_bio_end_io;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001577 bio->bi_bdev = sbio->dev->bdev;
1578 bio->bi_sector = sbio->physical >> 9;
Arne Jansen69f4cb52011-11-11 08:17:10 -05001579 sbio->err = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001580 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1581 spage->physical ||
1582 sbio->logical + sbio->page_count * PAGE_SIZE !=
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001583 spage->logical ||
1584 sbio->dev != spage->dev) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001585 scrub_submit(sctx);
Arne Jansen69f4cb52011-11-11 08:17:10 -05001586 goto again;
1587 }
1588
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001589 sbio->pagev[sbio->page_count] = spage;
1590 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1591 if (ret != PAGE_SIZE) {
1592 if (sbio->page_count < 1) {
1593 bio_put(sbio->bio);
1594 sbio->bio = NULL;
1595 return -EIO;
1596 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001597 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001598 goto again;
Arne Jansena2de7332011-03-08 14:14:00 +01001599 }
Arne Jansen1bc87792011-05-28 21:57:55 +02001600
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001601 scrub_block_get(sblock); /* one for the added page */
1602 atomic_inc(&sblock->outstanding_pages);
1603 sbio->page_count++;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001604 if (sbio->page_count == sctx->pages_per_bio)
1605 scrub_submit(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01001606
1607 return 0;
1608}
1609
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001610static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001611 u64 physical, struct btrfs_device *dev, u64 flags,
1612 u64 gen, int mirror_num, u8 *csum, int force)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001613{
1614 struct scrub_block *sblock;
1615 int index;
1616
1617 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
1618 if (!sblock) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001619 spin_lock(&sctx->stat_lock);
1620 sctx->stat.malloc_errors++;
1621 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001622 return -ENOMEM;
1623 }
1624
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001625 /* one ref inside this function, plus one for each page added to
1626 * a bio later on */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001627 atomic_set(&sblock->ref_count, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001628 sblock->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001629 sblock->no_io_error_seen = 1;
1630
1631 for (index = 0; len > 0; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001632 struct scrub_page *spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001633 u64 l = min_t(u64, len, PAGE_SIZE);
1634
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001635 spage = kzalloc(sizeof(*spage), GFP_NOFS);
1636 if (!spage) {
1637leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001638 spin_lock(&sctx->stat_lock);
1639 sctx->stat.malloc_errors++;
1640 spin_unlock(&sctx->stat_lock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001641 scrub_block_put(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001642 return -ENOMEM;
1643 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001644 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
1645 scrub_page_get(spage);
1646 sblock->pagev[index] = spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001647 spage->sblock = sblock;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001648 spage->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001649 spage->flags = flags;
1650 spage->generation = gen;
1651 spage->logical = logical;
1652 spage->physical = physical;
1653 spage->mirror_num = mirror_num;
1654 if (csum) {
1655 spage->have_csum = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001656 memcpy(spage->csum, csum, sctx->csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001657 } else {
1658 spage->have_csum = 0;
1659 }
1660 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001661 spage->page = alloc_page(GFP_NOFS);
1662 if (!spage->page)
1663 goto leave_nomem;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001664 len -= l;
1665 logical += l;
1666 physical += l;
1667 }
1668
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001669 WARN_ON(sblock->page_count == 0);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001670 for (index = 0; index < sblock->page_count; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001671 struct scrub_page *spage = sblock->pagev[index];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001672 int ret;
1673
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001674 ret = scrub_add_page_to_bio(sctx, spage);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001675 if (ret) {
1676 scrub_block_put(sblock);
1677 return ret;
1678 }
1679 }
1680
1681 if (force)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001682 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001683
1684 /* last one frees, either here or in bio completion for last page */
1685 scrub_block_put(sblock);
1686 return 0;
1687}
1688
1689static void scrub_bio_end_io(struct bio *bio, int err)
1690{
1691 struct scrub_bio *sbio = bio->bi_private;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001692 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001693
1694 sbio->err = err;
1695 sbio->bio = bio;
1696
1697 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
1698}
1699
1700static void scrub_bio_end_io_worker(struct btrfs_work *work)
1701{
1702 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001703 struct scrub_ctx *sctx = sbio->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001704 int i;
1705
1706 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
1707 if (sbio->err) {
1708 for (i = 0; i < sbio->page_count; i++) {
1709 struct scrub_page *spage = sbio->pagev[i];
1710
1711 spage->io_error = 1;
1712 spage->sblock->no_io_error_seen = 0;
1713 }
1714 }
1715
1716 /* now complete the scrub_block items that have all pages completed */
1717 for (i = 0; i < sbio->page_count; i++) {
1718 struct scrub_page *spage = sbio->pagev[i];
1719 struct scrub_block *sblock = spage->sblock;
1720
1721 if (atomic_dec_and_test(&sblock->outstanding_pages))
1722 scrub_block_complete(sblock);
1723 scrub_block_put(sblock);
1724 }
1725
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001726 bio_put(sbio->bio);
1727 sbio->bio = NULL;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001728 spin_lock(&sctx->list_lock);
1729 sbio->next_free = sctx->first_free;
1730 sctx->first_free = sbio->index;
1731 spin_unlock(&sctx->list_lock);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001732 scrub_pending_bio_dec(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001733}
1734
1735static void scrub_block_complete(struct scrub_block *sblock)
1736{
1737 if (!sblock->no_io_error_seen)
1738 scrub_handle_errored_block(sblock);
1739 else
1740 scrub_checksum(sblock);
1741}
1742
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001743static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
Arne Jansena2de7332011-03-08 14:14:00 +01001744 u8 *csum)
1745{
1746 struct btrfs_ordered_sum *sum = NULL;
1747 int ret = 0;
1748 unsigned long i;
1749 unsigned long num_sectors;
Arne Jansena2de7332011-03-08 14:14:00 +01001750
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001751 while (!list_empty(&sctx->csum_list)) {
1752 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +01001753 struct btrfs_ordered_sum, list);
1754 if (sum->bytenr > logical)
1755 return 0;
1756 if (sum->bytenr + sum->len > logical)
1757 break;
1758
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001759 ++sctx->stat.csum_discards;
Arne Jansena2de7332011-03-08 14:14:00 +01001760 list_del(&sum->list);
1761 kfree(sum);
1762 sum = NULL;
1763 }
1764 if (!sum)
1765 return 0;
1766
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001767 num_sectors = sum->len / sctx->sectorsize;
Arne Jansena2de7332011-03-08 14:14:00 +01001768 for (i = 0; i < num_sectors; ++i) {
1769 if (sum->sums[i].bytenr == logical) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001770 memcpy(csum, &sum->sums[i].sum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001771 ret = 1;
1772 break;
1773 }
1774 }
1775 if (ret && i == num_sectors - 1) {
1776 list_del(&sum->list);
1777 kfree(sum);
1778 }
1779 return ret;
1780}
1781
1782/* scrub extent tries to collect up to 64 kB for each bio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001783static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001784 u64 physical, struct btrfs_device *dev, u64 flags,
1785 u64 gen, int mirror_num)
Arne Jansena2de7332011-03-08 14:14:00 +01001786{
1787 int ret;
1788 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001789 u32 blocksize;
1790
1791 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001792 blocksize = sctx->sectorsize;
1793 spin_lock(&sctx->stat_lock);
1794 sctx->stat.data_extents_scrubbed++;
1795 sctx->stat.data_bytes_scrubbed += len;
1796 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001797 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001798 BUG_ON(sctx->nodesize != sctx->leafsize);
1799 blocksize = sctx->nodesize;
1800 spin_lock(&sctx->stat_lock);
1801 sctx->stat.tree_extents_scrubbed++;
1802 sctx->stat.tree_bytes_scrubbed += len;
1803 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001804 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001805 blocksize = sctx->sectorsize;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001806 BUG_ON(1);
1807 }
Arne Jansena2de7332011-03-08 14:14:00 +01001808
1809 while (len) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001810 u64 l = min_t(u64, len, blocksize);
Arne Jansena2de7332011-03-08 14:14:00 +01001811 int have_csum = 0;
1812
1813 if (flags & BTRFS_EXTENT_FLAG_DATA) {
1814 /* push csums to sbio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001815 have_csum = scrub_find_csum(sctx, logical, l, csum);
Arne Jansena2de7332011-03-08 14:14:00 +01001816 if (have_csum == 0)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001817 ++sctx->stat.no_csum;
Arne Jansena2de7332011-03-08 14:14:00 +01001818 }
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001819 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001820 mirror_num, have_csum ? csum : NULL, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01001821 if (ret)
1822 return ret;
1823 len -= l;
1824 logical += l;
1825 physical += l;
1826 }
1827 return 0;
1828}
1829
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001830static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001831 struct map_lookup *map,
1832 struct btrfs_device *scrub_dev,
1833 int num, u64 base, u64 length)
Arne Jansena2de7332011-03-08 14:14:00 +01001834{
1835 struct btrfs_path *path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001836 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01001837 struct btrfs_root *root = fs_info->extent_root;
1838 struct btrfs_root *csum_root = fs_info->csum_root;
1839 struct btrfs_extent_item *extent;
Arne Jansene7786c32011-05-28 20:58:38 +00001840 struct blk_plug plug;
Arne Jansena2de7332011-03-08 14:14:00 +01001841 u64 flags;
1842 int ret;
1843 int slot;
1844 int i;
1845 u64 nstripes;
Arne Jansena2de7332011-03-08 14:14:00 +01001846 struct extent_buffer *l;
1847 struct btrfs_key key;
1848 u64 physical;
1849 u64 logical;
1850 u64 generation;
Jan Schmidte12fa9c2011-06-17 15:55:21 +02001851 int mirror_num;
Arne Jansen7a262852011-06-10 12:39:23 +02001852 struct reada_control *reada1;
1853 struct reada_control *reada2;
1854 struct btrfs_key key_start;
1855 struct btrfs_key key_end;
Arne Jansena2de7332011-03-08 14:14:00 +01001856 u64 increment = map->stripe_len;
1857 u64 offset;
1858
1859 nstripes = length;
1860 offset = 0;
1861 do_div(nstripes, map->stripe_len);
1862 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1863 offset = map->stripe_len * num;
1864 increment = map->stripe_len * map->num_stripes;
Jan Schmidt193ea742011-06-13 19:56:54 +02001865 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001866 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1867 int factor = map->num_stripes / map->sub_stripes;
1868 offset = map->stripe_len * (num / map->sub_stripes);
1869 increment = map->stripe_len * factor;
Jan Schmidt193ea742011-06-13 19:56:54 +02001870 mirror_num = num % map->sub_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001871 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1872 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02001873 mirror_num = num % map->num_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001874 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1875 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02001876 mirror_num = num % map->num_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001877 } else {
1878 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02001879 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001880 }
1881
1882 path = btrfs_alloc_path();
1883 if (!path)
1884 return -ENOMEM;
1885
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001886 /*
1887 * work on commit root. The related disk blocks are static as
1888 * long as COW is applied. This means, it is save to rewrite
1889 * them to repair disk errors without any race conditions
1890 */
Arne Jansena2de7332011-03-08 14:14:00 +01001891 path->search_commit_root = 1;
1892 path->skip_locking = 1;
1893
1894 /*
Arne Jansen7a262852011-06-10 12:39:23 +02001895 * trigger the readahead for extent tree csum tree and wait for
1896 * completion. During readahead, the scrub is officially paused
1897 * to not hold off transaction commits
Arne Jansena2de7332011-03-08 14:14:00 +01001898 */
1899 logical = base + offset;
Arne Jansena2de7332011-03-08 14:14:00 +01001900
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001901 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001902 atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansen7a262852011-06-10 12:39:23 +02001903 atomic_inc(&fs_info->scrubs_paused);
1904 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01001905
Arne Jansen7a262852011-06-10 12:39:23 +02001906 /* FIXME it might be better to start readahead at commit root */
1907 key_start.objectid = logical;
1908 key_start.type = BTRFS_EXTENT_ITEM_KEY;
1909 key_start.offset = (u64)0;
1910 key_end.objectid = base + offset + nstripes * increment;
1911 key_end.type = BTRFS_EXTENT_ITEM_KEY;
1912 key_end.offset = (u64)0;
1913 reada1 = btrfs_reada_add(root, &key_start, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01001914
Arne Jansen7a262852011-06-10 12:39:23 +02001915 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1916 key_start.type = BTRFS_EXTENT_CSUM_KEY;
1917 key_start.offset = logical;
1918 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1919 key_end.type = BTRFS_EXTENT_CSUM_KEY;
1920 key_end.offset = base + offset + nstripes * increment;
1921 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01001922
Arne Jansen7a262852011-06-10 12:39:23 +02001923 if (!IS_ERR(reada1))
1924 btrfs_reada_wait(reada1);
1925 if (!IS_ERR(reada2))
1926 btrfs_reada_wait(reada2);
Arne Jansena2de7332011-03-08 14:14:00 +01001927
Arne Jansen7a262852011-06-10 12:39:23 +02001928 mutex_lock(&fs_info->scrub_lock);
1929 while (atomic_read(&fs_info->scrub_pause_req)) {
1930 mutex_unlock(&fs_info->scrub_lock);
1931 wait_event(fs_info->scrub_pause_wait,
1932 atomic_read(&fs_info->scrub_pause_req) == 0);
1933 mutex_lock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01001934 }
Arne Jansen7a262852011-06-10 12:39:23 +02001935 atomic_dec(&fs_info->scrubs_paused);
1936 mutex_unlock(&fs_info->scrub_lock);
1937 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01001938
1939 /*
1940 * collect all data csums for the stripe to avoid seeking during
1941 * the scrub. This might currently (crc32) end up to be about 1MB
1942 */
Arne Jansene7786c32011-05-28 20:58:38 +00001943 blk_start_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01001944
Arne Jansena2de7332011-03-08 14:14:00 +01001945 /*
1946 * now find all extents for each stripe and scrub them
1947 */
Arne Jansen7a262852011-06-10 12:39:23 +02001948 logical = base + offset;
1949 physical = map->stripes[num].physical;
Arne Jansena2de7332011-03-08 14:14:00 +01001950 ret = 0;
Arne Jansen7a262852011-06-10 12:39:23 +02001951 for (i = 0; i < nstripes; ++i) {
Arne Jansena2de7332011-03-08 14:14:00 +01001952 /*
1953 * canceled?
1954 */
1955 if (atomic_read(&fs_info->scrub_cancel_req) ||
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001956 atomic_read(&sctx->cancel_req)) {
Arne Jansena2de7332011-03-08 14:14:00 +01001957 ret = -ECANCELED;
1958 goto out;
1959 }
1960 /*
1961 * check to see if we have to pause
1962 */
1963 if (atomic_read(&fs_info->scrub_pause_req)) {
1964 /* push queued extents */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001965 scrub_submit(sctx);
1966 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001967 atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01001968 atomic_inc(&fs_info->scrubs_paused);
1969 wake_up(&fs_info->scrub_pause_wait);
1970 mutex_lock(&fs_info->scrub_lock);
1971 while (atomic_read(&fs_info->scrub_pause_req)) {
1972 mutex_unlock(&fs_info->scrub_lock);
1973 wait_event(fs_info->scrub_pause_wait,
1974 atomic_read(&fs_info->scrub_pause_req) == 0);
1975 mutex_lock(&fs_info->scrub_lock);
1976 }
1977 atomic_dec(&fs_info->scrubs_paused);
1978 mutex_unlock(&fs_info->scrub_lock);
1979 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01001980 }
1981
Arne Jansen7a262852011-06-10 12:39:23 +02001982 ret = btrfs_lookup_csums_range(csum_root, logical,
1983 logical + map->stripe_len - 1,
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001984 &sctx->csum_list, 1);
Arne Jansen7a262852011-06-10 12:39:23 +02001985 if (ret)
1986 goto out;
1987
Arne Jansena2de7332011-03-08 14:14:00 +01001988 key.objectid = logical;
1989 key.type = BTRFS_EXTENT_ITEM_KEY;
1990 key.offset = (u64)0;
1991
1992 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1993 if (ret < 0)
1994 goto out;
Arne Jansen8c510322011-06-03 10:09:26 +02001995 if (ret > 0) {
Arne Jansena2de7332011-03-08 14:14:00 +01001996 ret = btrfs_previous_item(root, path, 0,
1997 BTRFS_EXTENT_ITEM_KEY);
1998 if (ret < 0)
1999 goto out;
Arne Jansen8c510322011-06-03 10:09:26 +02002000 if (ret > 0) {
2001 /* there's no smaller item, so stick with the
2002 * larger one */
2003 btrfs_release_path(path);
2004 ret = btrfs_search_slot(NULL, root, &key,
2005 path, 0, 0);
2006 if (ret < 0)
2007 goto out;
2008 }
Arne Jansena2de7332011-03-08 14:14:00 +01002009 }
2010
2011 while (1) {
2012 l = path->nodes[0];
2013 slot = path->slots[0];
2014 if (slot >= btrfs_header_nritems(l)) {
2015 ret = btrfs_next_leaf(root, path);
2016 if (ret == 0)
2017 continue;
2018 if (ret < 0)
2019 goto out;
2020
2021 break;
2022 }
2023 btrfs_item_key_to_cpu(l, &key, slot);
2024
2025 if (key.objectid + key.offset <= logical)
2026 goto next;
2027
2028 if (key.objectid >= logical + map->stripe_len)
2029 break;
2030
2031 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
2032 goto next;
2033
2034 extent = btrfs_item_ptr(l, slot,
2035 struct btrfs_extent_item);
2036 flags = btrfs_extent_flags(l, extent);
2037 generation = btrfs_extent_generation(l, extent);
2038
2039 if (key.objectid < logical &&
2040 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2041 printk(KERN_ERR
2042 "btrfs scrub: tree block %llu spanning "
2043 "stripes, ignored. logical=%llu\n",
2044 (unsigned long long)key.objectid,
2045 (unsigned long long)logical);
2046 goto next;
2047 }
2048
2049 /*
2050 * trim extent to this stripe
2051 */
2052 if (key.objectid < logical) {
2053 key.offset -= logical - key.objectid;
2054 key.objectid = logical;
2055 }
2056 if (key.objectid + key.offset >
2057 logical + map->stripe_len) {
2058 key.offset = logical + map->stripe_len -
2059 key.objectid;
2060 }
2061
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002062 ret = scrub_extent(sctx, key.objectid, key.offset,
Arne Jansena2de7332011-03-08 14:14:00 +01002063 key.objectid - logical + physical,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002064 scrub_dev, flags, generation,
2065 mirror_num);
Arne Jansena2de7332011-03-08 14:14:00 +01002066 if (ret)
2067 goto out;
2068
2069next:
2070 path->slots[0]++;
2071 }
Chris Mason71267332011-05-23 06:30:52 -04002072 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01002073 logical += increment;
2074 physical += map->stripe_len;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002075 spin_lock(&sctx->stat_lock);
2076 sctx->stat.last_physical = physical;
2077 spin_unlock(&sctx->stat_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002078 }
2079 /* push queued extents */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002080 scrub_submit(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002081
2082out:
Arne Jansene7786c32011-05-28 20:58:38 +00002083 blk_finish_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01002084 btrfs_free_path(path);
2085 return ret < 0 ? ret : 0;
2086}
2087
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002088static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002089 struct btrfs_device *scrub_dev,
2090 u64 chunk_tree, u64 chunk_objectid,
2091 u64 chunk_offset, u64 length,
2092 u64 dev_offset)
Arne Jansena2de7332011-03-08 14:14:00 +01002093{
2094 struct btrfs_mapping_tree *map_tree =
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002095 &sctx->dev_root->fs_info->mapping_tree;
Arne Jansena2de7332011-03-08 14:14:00 +01002096 struct map_lookup *map;
2097 struct extent_map *em;
2098 int i;
2099 int ret = -EINVAL;
2100
2101 read_lock(&map_tree->map_tree.lock);
2102 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2103 read_unlock(&map_tree->map_tree.lock);
2104
2105 if (!em)
2106 return -EINVAL;
2107
2108 map = (struct map_lookup *)em->bdev;
2109 if (em->start != chunk_offset)
2110 goto out;
2111
2112 if (em->len < length)
2113 goto out;
2114
2115 for (i = 0; i < map->num_stripes; ++i) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002116 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
Arne Jansen859acaf2012-02-09 15:09:02 +01002117 map->stripes[i].physical == dev_offset) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002118 ret = scrub_stripe(sctx, map, scrub_dev, i,
2119 chunk_offset, length);
Arne Jansena2de7332011-03-08 14:14:00 +01002120 if (ret)
2121 goto out;
2122 }
2123 }
2124out:
2125 free_extent_map(em);
2126
2127 return ret;
2128}
2129
2130static noinline_for_stack
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002131int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2132 struct btrfs_device *scrub_dev, u64 start, u64 end)
Arne Jansena2de7332011-03-08 14:14:00 +01002133{
2134 struct btrfs_dev_extent *dev_extent = NULL;
2135 struct btrfs_path *path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002136 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01002137 struct btrfs_fs_info *fs_info = root->fs_info;
2138 u64 length;
2139 u64 chunk_tree;
2140 u64 chunk_objectid;
2141 u64 chunk_offset;
2142 int ret;
2143 int slot;
2144 struct extent_buffer *l;
2145 struct btrfs_key key;
2146 struct btrfs_key found_key;
2147 struct btrfs_block_group_cache *cache;
2148
2149 path = btrfs_alloc_path();
2150 if (!path)
2151 return -ENOMEM;
2152
2153 path->reada = 2;
2154 path->search_commit_root = 1;
2155 path->skip_locking = 1;
2156
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002157 key.objectid = scrub_dev->devid;
Arne Jansena2de7332011-03-08 14:14:00 +01002158 key.offset = 0ull;
2159 key.type = BTRFS_DEV_EXTENT_KEY;
2160
Arne Jansena2de7332011-03-08 14:14:00 +01002161 while (1) {
2162 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2163 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02002164 break;
2165 if (ret > 0) {
2166 if (path->slots[0] >=
2167 btrfs_header_nritems(path->nodes[0])) {
2168 ret = btrfs_next_leaf(root, path);
2169 if (ret)
2170 break;
2171 }
2172 }
Arne Jansena2de7332011-03-08 14:14:00 +01002173
2174 l = path->nodes[0];
2175 slot = path->slots[0];
2176
2177 btrfs_item_key_to_cpu(l, &found_key, slot);
2178
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002179 if (found_key.objectid != scrub_dev->devid)
Arne Jansena2de7332011-03-08 14:14:00 +01002180 break;
2181
Arne Jansen8c510322011-06-03 10:09:26 +02002182 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
Arne Jansena2de7332011-03-08 14:14:00 +01002183 break;
2184
2185 if (found_key.offset >= end)
2186 break;
2187
2188 if (found_key.offset < key.offset)
2189 break;
2190
2191 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2192 length = btrfs_dev_extent_length(l, dev_extent);
2193
2194 if (found_key.offset + length <= start) {
2195 key.offset = found_key.offset + length;
Chris Mason71267332011-05-23 06:30:52 -04002196 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01002197 continue;
2198 }
2199
2200 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2201 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2202 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2203
2204 /*
2205 * get a reference on the corresponding block group to prevent
2206 * the chunk from going away while we scrub it
2207 */
2208 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2209 if (!cache) {
2210 ret = -ENOENT;
Arne Jansen8c510322011-06-03 10:09:26 +02002211 break;
Arne Jansena2de7332011-03-08 14:14:00 +01002212 }
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002213 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
Arne Jansen859acaf2012-02-09 15:09:02 +01002214 chunk_offset, length, found_key.offset);
Arne Jansena2de7332011-03-08 14:14:00 +01002215 btrfs_put_block_group(cache);
2216 if (ret)
2217 break;
2218
2219 key.offset = found_key.offset + length;
Chris Mason71267332011-05-23 06:30:52 -04002220 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01002221 }
2222
Arne Jansena2de7332011-03-08 14:14:00 +01002223 btrfs_free_path(path);
Arne Jansen8c510322011-06-03 10:09:26 +02002224
2225 /*
2226 * ret can still be 1 from search_slot or next_leaf,
2227 * that's not an error
2228 */
2229 return ret < 0 ? ret : 0;
Arne Jansena2de7332011-03-08 14:14:00 +01002230}
2231
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002232static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2233 struct btrfs_device *scrub_dev)
Arne Jansena2de7332011-03-08 14:14:00 +01002234{
2235 int i;
2236 u64 bytenr;
2237 u64 gen;
2238 int ret;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002239 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01002240
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002241 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2242 return -EIO;
2243
Arne Jansena2de7332011-03-08 14:14:00 +01002244 gen = root->fs_info->last_trans_committed;
2245
2246 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2247 bytenr = btrfs_sb_offset(i);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002248 if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes)
Arne Jansena2de7332011-03-08 14:14:00 +01002249 break;
2250
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002251 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002252 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
2253 NULL, 1);
Arne Jansena2de7332011-03-08 14:14:00 +01002254 if (ret)
2255 return ret;
2256 }
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002257 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01002258
2259 return 0;
2260}
2261
2262/*
2263 * get a reference count on fs_info->scrub_workers. start worker if necessary
2264 */
2265static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
2266{
2267 struct btrfs_fs_info *fs_info = root->fs_info;
Josef Bacik0dc3b842011-11-18 14:37:27 -05002268 int ret = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01002269
2270 mutex_lock(&fs_info->scrub_lock);
Arne Jansen632dd772011-06-10 12:07:07 +02002271 if (fs_info->scrub_workers_refcnt == 0) {
2272 btrfs_init_workers(&fs_info->scrub_workers, "scrub",
2273 fs_info->thread_pool_size, &fs_info->generic_worker);
2274 fs_info->scrub_workers.idle_thresh = 4;
Josef Bacik0dc3b842011-11-18 14:37:27 -05002275 ret = btrfs_start_workers(&fs_info->scrub_workers);
2276 if (ret)
2277 goto out;
Arne Jansen632dd772011-06-10 12:07:07 +02002278 }
Arne Jansena2de7332011-03-08 14:14:00 +01002279 ++fs_info->scrub_workers_refcnt;
Josef Bacik0dc3b842011-11-18 14:37:27 -05002280out:
Arne Jansena2de7332011-03-08 14:14:00 +01002281 mutex_unlock(&fs_info->scrub_lock);
2282
Josef Bacik0dc3b842011-11-18 14:37:27 -05002283 return ret;
Arne Jansena2de7332011-03-08 14:14:00 +01002284}
2285
2286static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
2287{
2288 struct btrfs_fs_info *fs_info = root->fs_info;
2289
2290 mutex_lock(&fs_info->scrub_lock);
2291 if (--fs_info->scrub_workers_refcnt == 0)
2292 btrfs_stop_workers(&fs_info->scrub_workers);
2293 WARN_ON(fs_info->scrub_workers_refcnt < 0);
2294 mutex_unlock(&fs_info->scrub_lock);
2295}
2296
2297
2298int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
Arne Jansen86287642011-03-23 16:34:19 +01002299 struct btrfs_scrub_progress *progress, int readonly)
Arne Jansena2de7332011-03-08 14:14:00 +01002300{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002301 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01002302 struct btrfs_fs_info *fs_info = root->fs_info;
2303 int ret;
2304 struct btrfs_device *dev;
2305
David Sterba7841cb22011-05-31 18:07:27 +02002306 if (btrfs_fs_closing(root->fs_info))
Arne Jansena2de7332011-03-08 14:14:00 +01002307 return -EINVAL;
2308
2309 /*
2310 * check some assumptions
2311 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002312 if (root->nodesize != root->leafsize) {
2313 printk(KERN_ERR
2314 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2315 root->nodesize, root->leafsize);
2316 return -EINVAL;
2317 }
2318
2319 if (root->nodesize > BTRFS_STRIPE_LEN) {
2320 /*
2321 * in this case scrub is unable to calculate the checksum
2322 * the way scrub is implemented. Do not handle this
2323 * situation at all because it won't ever happen.
2324 */
2325 printk(KERN_ERR
2326 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2327 root->nodesize, BTRFS_STRIPE_LEN);
2328 return -EINVAL;
2329 }
2330
2331 if (root->sectorsize != PAGE_SIZE) {
2332 /* not supported for data w/o checksums */
2333 printk(KERN_ERR
2334 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2335 root->sectorsize, (unsigned long long)PAGE_SIZE);
Arne Jansena2de7332011-03-08 14:14:00 +01002336 return -EINVAL;
2337 }
2338
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002339 if (fs_info->chunk_root->nodesize >
2340 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
2341 fs_info->chunk_root->sectorsize >
2342 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
2343 /*
2344 * would exhaust the array bounds of pagev member in
2345 * struct scrub_block
2346 */
2347 pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n",
2348 fs_info->chunk_root->nodesize,
2349 SCRUB_MAX_PAGES_PER_BLOCK,
2350 fs_info->chunk_root->sectorsize,
2351 SCRUB_MAX_PAGES_PER_BLOCK);
2352 return -EINVAL;
2353 }
2354
Arne Jansena2de7332011-03-08 14:14:00 +01002355 ret = scrub_workers_get(root);
2356 if (ret)
2357 return ret;
2358
2359 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2360 dev = btrfs_find_device(root, devid, NULL, NULL);
2361 if (!dev || dev->missing) {
2362 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2363 scrub_workers_put(root);
2364 return -ENODEV;
2365 }
2366 mutex_lock(&fs_info->scrub_lock);
2367
2368 if (!dev->in_fs_metadata) {
2369 mutex_unlock(&fs_info->scrub_lock);
2370 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2371 scrub_workers_put(root);
2372 return -ENODEV;
2373 }
2374
2375 if (dev->scrub_device) {
2376 mutex_unlock(&fs_info->scrub_lock);
2377 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2378 scrub_workers_put(root);
2379 return -EINPROGRESS;
2380 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002381 sctx = scrub_setup_ctx(dev);
2382 if (IS_ERR(sctx)) {
Arne Jansena2de7332011-03-08 14:14:00 +01002383 mutex_unlock(&fs_info->scrub_lock);
2384 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2385 scrub_workers_put(root);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002386 return PTR_ERR(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002387 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002388 sctx->readonly = readonly;
2389 dev->scrub_device = sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01002390
2391 atomic_inc(&fs_info->scrubs_running);
2392 mutex_unlock(&fs_info->scrub_lock);
2393 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2394
2395 down_read(&fs_info->scrub_super_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002396 ret = scrub_supers(sctx, dev);
Arne Jansena2de7332011-03-08 14:14:00 +01002397 up_read(&fs_info->scrub_super_lock);
2398
2399 if (!ret)
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002400 ret = scrub_enumerate_chunks(sctx, dev, start, end);
Arne Jansena2de7332011-03-08 14:14:00 +01002401
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002402 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01002403 atomic_dec(&fs_info->scrubs_running);
2404 wake_up(&fs_info->scrub_pause_wait);
2405
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002406 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02002407
Arne Jansena2de7332011-03-08 14:14:00 +01002408 if (progress)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002409 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01002410
2411 mutex_lock(&fs_info->scrub_lock);
2412 dev->scrub_device = NULL;
2413 mutex_unlock(&fs_info->scrub_lock);
2414
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002415 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002416 scrub_workers_put(root);
2417
2418 return ret;
2419}
2420
Jeff Mahoney143bede2012-03-01 14:56:26 +01002421void btrfs_scrub_pause(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01002422{
2423 struct btrfs_fs_info *fs_info = root->fs_info;
2424
2425 mutex_lock(&fs_info->scrub_lock);
2426 atomic_inc(&fs_info->scrub_pause_req);
2427 while (atomic_read(&fs_info->scrubs_paused) !=
2428 atomic_read(&fs_info->scrubs_running)) {
2429 mutex_unlock(&fs_info->scrub_lock);
2430 wait_event(fs_info->scrub_pause_wait,
2431 atomic_read(&fs_info->scrubs_paused) ==
2432 atomic_read(&fs_info->scrubs_running));
2433 mutex_lock(&fs_info->scrub_lock);
2434 }
2435 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002436}
2437
Jeff Mahoney143bede2012-03-01 14:56:26 +01002438void btrfs_scrub_continue(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01002439{
2440 struct btrfs_fs_info *fs_info = root->fs_info;
2441
2442 atomic_dec(&fs_info->scrub_pause_req);
2443 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01002444}
2445
Jeff Mahoney143bede2012-03-01 14:56:26 +01002446void btrfs_scrub_pause_super(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01002447{
2448 down_write(&root->fs_info->scrub_super_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002449}
2450
Jeff Mahoney143bede2012-03-01 14:56:26 +01002451void btrfs_scrub_continue_super(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01002452{
2453 up_write(&root->fs_info->scrub_super_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002454}
2455
Jeff Mahoney49b25e02012-03-01 17:24:58 +01002456int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01002457{
Arne Jansena2de7332011-03-08 14:14:00 +01002458
2459 mutex_lock(&fs_info->scrub_lock);
2460 if (!atomic_read(&fs_info->scrubs_running)) {
2461 mutex_unlock(&fs_info->scrub_lock);
2462 return -ENOTCONN;
2463 }
2464
2465 atomic_inc(&fs_info->scrub_cancel_req);
2466 while (atomic_read(&fs_info->scrubs_running)) {
2467 mutex_unlock(&fs_info->scrub_lock);
2468 wait_event(fs_info->scrub_pause_wait,
2469 atomic_read(&fs_info->scrubs_running) == 0);
2470 mutex_lock(&fs_info->scrub_lock);
2471 }
2472 atomic_dec(&fs_info->scrub_cancel_req);
2473 mutex_unlock(&fs_info->scrub_lock);
2474
2475 return 0;
2476}
2477
Jeff Mahoney49b25e02012-03-01 17:24:58 +01002478int btrfs_scrub_cancel(struct btrfs_root *root)
2479{
2480 return __btrfs_scrub_cancel(root->fs_info);
2481}
2482
Arne Jansena2de7332011-03-08 14:14:00 +01002483int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
2484{
2485 struct btrfs_fs_info *fs_info = root->fs_info;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002486 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01002487
2488 mutex_lock(&fs_info->scrub_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002489 sctx = dev->scrub_device;
2490 if (!sctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01002491 mutex_unlock(&fs_info->scrub_lock);
2492 return -ENOTCONN;
2493 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002494 atomic_inc(&sctx->cancel_req);
Arne Jansena2de7332011-03-08 14:14:00 +01002495 while (dev->scrub_device) {
2496 mutex_unlock(&fs_info->scrub_lock);
2497 wait_event(fs_info->scrub_pause_wait,
2498 dev->scrub_device == NULL);
2499 mutex_lock(&fs_info->scrub_lock);
2500 }
2501 mutex_unlock(&fs_info->scrub_lock);
2502
2503 return 0;
2504}
Stefan Behrens1623ede2012-03-27 14:21:26 -04002505
Arne Jansena2de7332011-03-08 14:14:00 +01002506int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
2507{
2508 struct btrfs_fs_info *fs_info = root->fs_info;
2509 struct btrfs_device *dev;
2510 int ret;
2511
2512 /*
2513 * we have to hold the device_list_mutex here so the device
2514 * does not go away in cancel_dev. FIXME: find a better solution
2515 */
2516 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2517 dev = btrfs_find_device(root, devid, NULL, NULL);
2518 if (!dev) {
2519 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2520 return -ENODEV;
2521 }
2522 ret = btrfs_scrub_cancel_dev(root, dev);
2523 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2524
2525 return ret;
2526}
2527
2528int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
2529 struct btrfs_scrub_progress *progress)
2530{
2531 struct btrfs_device *dev;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002532 struct scrub_ctx *sctx = NULL;
Arne Jansena2de7332011-03-08 14:14:00 +01002533
2534 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2535 dev = btrfs_find_device(root, devid, NULL, NULL);
2536 if (dev)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002537 sctx = dev->scrub_device;
2538 if (sctx)
2539 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01002540 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2541
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002542 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
Arne Jansena2de7332011-03-08 14:14:00 +01002543}