Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 STRATO. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/pagemap.h> |
| 21 | #include <linux/writeback.h> |
| 22 | #include <linux/blkdev.h> |
| 23 | #include <linux/rbtree.h> |
| 24 | #include <linux/slab.h> |
| 25 | #include <linux/workqueue.h> |
| 26 | #include "ctree.h" |
| 27 | #include "volumes.h" |
| 28 | #include "disk-io.h" |
| 29 | #include "ordered-data.h" |
| 30 | |
| 31 | /* |
| 32 | * This is only the first step towards a full-features scrub. It reads all |
| 33 | * extent and super block and verifies the checksums. In case a bad checksum |
| 34 | * is found or the extent cannot be read, good data will be written back if |
| 35 | * any can be found. |
| 36 | * |
| 37 | * Future enhancements: |
| 38 | * - To enhance the performance, better read-ahead strategies for the |
| 39 | * extent-tree can be employed. |
| 40 | * - In case an unrepairable extent is encountered, track which files are |
| 41 | * affected and report them |
| 42 | * - In case of a read error on files with nodatasum, map the file and read |
| 43 | * the extent to trigger a writeback of the good copy |
| 44 | * - track and record media errors, throw out bad devices |
| 45 | * - add a readonly mode |
| 46 | * - add a mode to also read unallocated space |
| 47 | * - make the prefetch cancellable |
| 48 | */ |
| 49 | |
| 50 | struct scrub_bio; |
| 51 | struct scrub_page; |
| 52 | struct scrub_dev; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 53 | static void scrub_bio_end_io(struct bio *bio, int err); |
| 54 | static void scrub_checksum(struct btrfs_work *work); |
| 55 | static int scrub_checksum_data(struct scrub_dev *sdev, |
| 56 | struct scrub_page *spag, void *buffer); |
| 57 | static int scrub_checksum_tree_block(struct scrub_dev *sdev, |
| 58 | struct scrub_page *spag, u64 logical, |
| 59 | void *buffer); |
| 60 | static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer); |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 61 | static int scrub_fixup_check(struct scrub_bio *sbio, int ix); |
| 62 | static void scrub_fixup_end_io(struct bio *bio, int err); |
| 63 | static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, |
| 64 | struct page *page); |
| 65 | static void scrub_fixup(struct scrub_bio *sbio, int ix); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 66 | |
| 67 | #define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */ |
| 68 | #define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */ |
| 69 | |
| 70 | struct scrub_page { |
| 71 | u64 flags; /* extent flags */ |
| 72 | u64 generation; |
| 73 | u64 mirror_num; |
| 74 | int have_csum; |
| 75 | u8 csum[BTRFS_CSUM_SIZE]; |
| 76 | }; |
| 77 | |
| 78 | struct scrub_bio { |
| 79 | int index; |
| 80 | struct scrub_dev *sdev; |
| 81 | struct bio *bio; |
| 82 | int err; |
| 83 | u64 logical; |
| 84 | u64 physical; |
| 85 | struct scrub_page spag[SCRUB_PAGES_PER_BIO]; |
| 86 | u64 count; |
| 87 | int next_free; |
| 88 | struct btrfs_work work; |
| 89 | }; |
| 90 | |
| 91 | struct scrub_dev { |
| 92 | struct scrub_bio *bios[SCRUB_BIOS_PER_DEV]; |
| 93 | struct btrfs_device *dev; |
| 94 | int first_free; |
| 95 | int curr; |
| 96 | atomic_t in_flight; |
| 97 | spinlock_t list_lock; |
| 98 | wait_queue_head_t list_wait; |
| 99 | u16 csum_size; |
| 100 | struct list_head csum_list; |
| 101 | atomic_t cancel_req; |
| 102 | /* |
| 103 | * statistics |
| 104 | */ |
| 105 | struct btrfs_scrub_progress stat; |
| 106 | spinlock_t stat_lock; |
| 107 | }; |
| 108 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 109 | static void scrub_free_csums(struct scrub_dev *sdev) |
| 110 | { |
| 111 | while (!list_empty(&sdev->csum_list)) { |
| 112 | struct btrfs_ordered_sum *sum; |
| 113 | sum = list_first_entry(&sdev->csum_list, |
| 114 | struct btrfs_ordered_sum, list); |
| 115 | list_del(&sum->list); |
| 116 | kfree(sum); |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) |
| 121 | { |
| 122 | int i; |
| 123 | int j; |
| 124 | struct page *last_page; |
| 125 | |
| 126 | if (!sdev) |
| 127 | return; |
| 128 | |
| 129 | for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { |
| 130 | struct scrub_bio *sbio = sdev->bios[i]; |
| 131 | struct bio *bio; |
| 132 | |
| 133 | if (!sbio) |
| 134 | break; |
| 135 | |
| 136 | bio = sbio->bio; |
| 137 | if (bio) { |
| 138 | last_page = NULL; |
| 139 | for (j = 0; j < bio->bi_vcnt; ++j) { |
| 140 | if (bio->bi_io_vec[j].bv_page == last_page) |
| 141 | continue; |
| 142 | last_page = bio->bi_io_vec[j].bv_page; |
| 143 | __free_page(last_page); |
| 144 | } |
| 145 | bio_put(bio); |
| 146 | } |
| 147 | kfree(sbio); |
| 148 | } |
| 149 | |
| 150 | scrub_free_csums(sdev); |
| 151 | kfree(sdev); |
| 152 | } |
| 153 | |
| 154 | static noinline_for_stack |
| 155 | struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) |
| 156 | { |
| 157 | struct scrub_dev *sdev; |
| 158 | int i; |
| 159 | int j; |
| 160 | int ret; |
| 161 | struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; |
| 162 | |
| 163 | sdev = kzalloc(sizeof(*sdev), GFP_NOFS); |
| 164 | if (!sdev) |
| 165 | goto nomem; |
| 166 | sdev->dev = dev; |
| 167 | for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { |
| 168 | struct bio *bio; |
| 169 | struct scrub_bio *sbio; |
| 170 | |
| 171 | sbio = kzalloc(sizeof(*sbio), GFP_NOFS); |
| 172 | if (!sbio) |
| 173 | goto nomem; |
| 174 | sdev->bios[i] = sbio; |
| 175 | |
| 176 | bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); |
| 177 | if (!bio) |
| 178 | goto nomem; |
| 179 | |
| 180 | sbio->index = i; |
| 181 | sbio->sdev = sdev; |
| 182 | sbio->bio = bio; |
| 183 | sbio->count = 0; |
| 184 | sbio->work.func = scrub_checksum; |
| 185 | bio->bi_private = sdev->bios[i]; |
| 186 | bio->bi_end_io = scrub_bio_end_io; |
| 187 | bio->bi_sector = 0; |
| 188 | bio->bi_bdev = dev->bdev; |
| 189 | bio->bi_size = 0; |
| 190 | |
| 191 | for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) { |
| 192 | struct page *page; |
| 193 | page = alloc_page(GFP_NOFS); |
| 194 | if (!page) |
| 195 | goto nomem; |
| 196 | |
| 197 | ret = bio_add_page(bio, page, PAGE_SIZE, 0); |
| 198 | if (!ret) |
| 199 | goto nomem; |
| 200 | } |
| 201 | WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO); |
| 202 | |
| 203 | if (i != SCRUB_BIOS_PER_DEV-1) |
| 204 | sdev->bios[i]->next_free = i + 1; |
| 205 | else |
| 206 | sdev->bios[i]->next_free = -1; |
| 207 | } |
| 208 | sdev->first_free = 0; |
| 209 | sdev->curr = -1; |
| 210 | atomic_set(&sdev->in_flight, 0); |
| 211 | atomic_set(&sdev->cancel_req, 0); |
| 212 | sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy); |
| 213 | INIT_LIST_HEAD(&sdev->csum_list); |
| 214 | |
| 215 | spin_lock_init(&sdev->list_lock); |
| 216 | spin_lock_init(&sdev->stat_lock); |
| 217 | init_waitqueue_head(&sdev->list_wait); |
| 218 | return sdev; |
| 219 | |
| 220 | nomem: |
| 221 | scrub_free_dev(sdev); |
| 222 | return ERR_PTR(-ENOMEM); |
| 223 | } |
| 224 | |
| 225 | /* |
| 226 | * scrub_recheck_error gets called when either verification of the page |
| 227 | * failed or the bio failed to read, e.g. with EIO. In the latter case, |
| 228 | * recheck_error gets called for every page in the bio, even though only |
| 229 | * one may be bad |
| 230 | */ |
| 231 | static void scrub_recheck_error(struct scrub_bio *sbio, int ix) |
| 232 | { |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 233 | if (sbio->err) { |
| 234 | if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, |
| 235 | (sbio->physical + ix * PAGE_SIZE) >> 9, |
| 236 | sbio->bio->bi_io_vec[ix].bv_page) == 0) { |
| 237 | if (scrub_fixup_check(sbio, ix) == 0) |
| 238 | return; |
| 239 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 240 | } |
| 241 | |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 242 | scrub_fixup(sbio, ix); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 243 | } |
| 244 | |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 245 | static int scrub_fixup_check(struct scrub_bio *sbio, int ix) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 246 | { |
| 247 | int ret = 1; |
| 248 | struct page *page; |
| 249 | void *buffer; |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 250 | u64 flags = sbio->spag[ix].flags; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 251 | |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 252 | page = sbio->bio->bi_io_vec[ix].bv_page; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 253 | buffer = kmap_atomic(page, KM_USER0); |
| 254 | if (flags & BTRFS_EXTENT_FLAG_DATA) { |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 255 | ret = scrub_checksum_data(sbio->sdev, |
| 256 | sbio->spag + ix, buffer); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 257 | } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 258 | ret = scrub_checksum_tree_block(sbio->sdev, |
| 259 | sbio->spag + ix, |
| 260 | sbio->logical + ix * PAGE_SIZE, |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 261 | buffer); |
| 262 | } else { |
| 263 | WARN_ON(1); |
| 264 | } |
| 265 | kunmap_atomic(buffer, KM_USER0); |
| 266 | |
| 267 | return ret; |
| 268 | } |
| 269 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 270 | static void scrub_fixup_end_io(struct bio *bio, int err) |
| 271 | { |
| 272 | complete((struct completion *)bio->bi_private); |
| 273 | } |
| 274 | |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 275 | static void scrub_fixup(struct scrub_bio *sbio, int ix) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 276 | { |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 277 | struct scrub_dev *sdev = sbio->sdev; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 278 | struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; |
| 279 | struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; |
| 280 | struct btrfs_multi_bio *multi = NULL; |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 281 | u64 logical = sbio->logical + ix * PAGE_SIZE; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 282 | u64 length; |
| 283 | int i; |
| 284 | int ret; |
| 285 | DECLARE_COMPLETION_ONSTACK(complete); |
| 286 | |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 287 | if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) && |
| 288 | (sbio->spag[ix].have_csum == 0)) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 289 | /* |
| 290 | * nodatasum, don't try to fix anything |
| 291 | * FIXME: we can do better, open the inode and trigger a |
| 292 | * writeback |
| 293 | */ |
| 294 | goto uncorrectable; |
| 295 | } |
| 296 | |
| 297 | length = PAGE_SIZE; |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 298 | ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 299 | &multi, 0); |
| 300 | if (ret || !multi || length < PAGE_SIZE) { |
| 301 | printk(KERN_ERR |
| 302 | "scrub_fixup: btrfs_map_block failed us for %llu\n", |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 303 | (unsigned long long)logical); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 304 | WARN_ON(1); |
| 305 | return; |
| 306 | } |
| 307 | |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 308 | if (multi->num_stripes == 1) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 309 | /* there aren't any replicas */ |
| 310 | goto uncorrectable; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 311 | |
| 312 | /* |
| 313 | * first find a good copy |
| 314 | */ |
| 315 | for (i = 0; i < multi->num_stripes; ++i) { |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 316 | if (i == sbio->spag[ix].mirror_num) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 317 | continue; |
| 318 | |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 319 | if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev, |
| 320 | multi->stripes[i].physical >> 9, |
| 321 | sbio->bio->bi_io_vec[ix].bv_page)) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 322 | /* I/O-error, this is not a good copy */ |
| 323 | continue; |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 324 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 325 | |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 326 | if (scrub_fixup_check(sbio, ix) == 0) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 327 | break; |
| 328 | } |
| 329 | if (i == multi->num_stripes) |
| 330 | goto uncorrectable; |
| 331 | |
| 332 | /* |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 333 | * bi_io_vec[ix].bv_page now contains good data, write it back |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 334 | */ |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 335 | if (scrub_fixup_io(WRITE, sdev->dev->bdev, |
| 336 | (sbio->physical + ix * PAGE_SIZE) >> 9, |
| 337 | sbio->bio->bi_io_vec[ix].bv_page)) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 338 | /* I/O-error, writeback failed, give up */ |
| 339 | goto uncorrectable; |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 340 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 341 | |
| 342 | kfree(multi); |
| 343 | spin_lock(&sdev->stat_lock); |
| 344 | ++sdev->stat.corrected_errors; |
| 345 | spin_unlock(&sdev->stat_lock); |
| 346 | |
| 347 | if (printk_ratelimit()) |
| 348 | printk(KERN_ERR "btrfs: fixed up at %llu\n", |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 349 | (unsigned long long)logical); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 350 | return; |
| 351 | |
| 352 | uncorrectable: |
| 353 | kfree(multi); |
| 354 | spin_lock(&sdev->stat_lock); |
| 355 | ++sdev->stat.uncorrectable_errors; |
| 356 | spin_unlock(&sdev->stat_lock); |
| 357 | |
| 358 | if (printk_ratelimit()) |
| 359 | printk(KERN_ERR "btrfs: unable to fixup at %llu\n", |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 360 | (unsigned long long)logical); |
| 361 | } |
| 362 | |
| 363 | static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, |
| 364 | struct page *page) |
| 365 | { |
| 366 | struct bio *bio = NULL; |
| 367 | int ret; |
| 368 | DECLARE_COMPLETION_ONSTACK(complete); |
| 369 | |
| 370 | /* we are going to wait on this IO */ |
| 371 | rw |= REQ_SYNC | REQ_UNPLUG; |
| 372 | |
| 373 | bio = bio_alloc(GFP_NOFS, 1); |
| 374 | bio->bi_bdev = bdev; |
| 375 | bio->bi_sector = sector; |
| 376 | bio_add_page(bio, page, PAGE_SIZE, 0); |
| 377 | bio->bi_end_io = scrub_fixup_end_io; |
| 378 | bio->bi_private = &complete; |
| 379 | submit_bio(rw, bio); |
| 380 | |
| 381 | wait_for_completion(&complete); |
| 382 | |
| 383 | ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); |
| 384 | bio_put(bio); |
| 385 | return ret; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 386 | } |
| 387 | |
| 388 | static void scrub_bio_end_io(struct bio *bio, int err) |
| 389 | { |
| 390 | struct scrub_bio *sbio = bio->bi_private; |
| 391 | struct scrub_dev *sdev = sbio->sdev; |
| 392 | struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; |
| 393 | |
| 394 | sbio->err = err; |
| 395 | |
| 396 | btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); |
| 397 | } |
| 398 | |
| 399 | static void scrub_checksum(struct btrfs_work *work) |
| 400 | { |
| 401 | struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); |
| 402 | struct scrub_dev *sdev = sbio->sdev; |
| 403 | struct page *page; |
| 404 | void *buffer; |
| 405 | int i; |
| 406 | u64 flags; |
| 407 | u64 logical; |
| 408 | int ret; |
| 409 | |
| 410 | if (sbio->err) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 411 | for (i = 0; i < sbio->count; ++i) |
| 412 | scrub_recheck_error(sbio, i); |
Ilya Dryomov | 96e3692 | 2011-04-09 14:27:01 +0300 | [diff] [blame^] | 413 | |
| 414 | sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1); |
| 415 | sbio->bio->bi_flags |= 1 << BIO_UPTODATE; |
| 416 | sbio->bio->bi_phys_segments = 0; |
| 417 | sbio->bio->bi_idx = 0; |
| 418 | |
| 419 | for (i = 0; i < sbio->count; i++) { |
| 420 | struct bio_vec *bi; |
| 421 | bi = &sbio->bio->bi_io_vec[i]; |
| 422 | bi->bv_offset = 0; |
| 423 | bi->bv_len = PAGE_SIZE; |
| 424 | } |
| 425 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 426 | spin_lock(&sdev->stat_lock); |
| 427 | ++sdev->stat.read_errors; |
| 428 | spin_unlock(&sdev->stat_lock); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 429 | goto out; |
| 430 | } |
| 431 | for (i = 0; i < sbio->count; ++i) { |
| 432 | page = sbio->bio->bi_io_vec[i].bv_page; |
| 433 | buffer = kmap_atomic(page, KM_USER0); |
| 434 | flags = sbio->spag[i].flags; |
| 435 | logical = sbio->logical + i * PAGE_SIZE; |
| 436 | ret = 0; |
| 437 | if (flags & BTRFS_EXTENT_FLAG_DATA) { |
| 438 | ret = scrub_checksum_data(sdev, sbio->spag + i, buffer); |
| 439 | } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
| 440 | ret = scrub_checksum_tree_block(sdev, sbio->spag + i, |
| 441 | logical, buffer); |
| 442 | } else if (flags & BTRFS_EXTENT_FLAG_SUPER) { |
| 443 | BUG_ON(i); |
| 444 | (void)scrub_checksum_super(sbio, buffer); |
| 445 | } else { |
| 446 | WARN_ON(1); |
| 447 | } |
| 448 | kunmap_atomic(buffer, KM_USER0); |
| 449 | if (ret) |
| 450 | scrub_recheck_error(sbio, i); |
| 451 | } |
| 452 | |
| 453 | out: |
| 454 | spin_lock(&sdev->list_lock); |
| 455 | sbio->next_free = sdev->first_free; |
| 456 | sdev->first_free = sbio->index; |
| 457 | spin_unlock(&sdev->list_lock); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 458 | atomic_dec(&sdev->in_flight); |
| 459 | wake_up(&sdev->list_wait); |
| 460 | } |
| 461 | |
| 462 | static int scrub_checksum_data(struct scrub_dev *sdev, |
| 463 | struct scrub_page *spag, void *buffer) |
| 464 | { |
| 465 | u8 csum[BTRFS_CSUM_SIZE]; |
| 466 | u32 crc = ~(u32)0; |
| 467 | int fail = 0; |
| 468 | struct btrfs_root *root = sdev->dev->dev_root; |
| 469 | |
| 470 | if (!spag->have_csum) |
| 471 | return 0; |
| 472 | |
| 473 | crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE); |
| 474 | btrfs_csum_final(crc, csum); |
| 475 | if (memcmp(csum, spag->csum, sdev->csum_size)) |
| 476 | fail = 1; |
| 477 | |
| 478 | spin_lock(&sdev->stat_lock); |
| 479 | ++sdev->stat.data_extents_scrubbed; |
| 480 | sdev->stat.data_bytes_scrubbed += PAGE_SIZE; |
| 481 | if (fail) |
| 482 | ++sdev->stat.csum_errors; |
| 483 | spin_unlock(&sdev->stat_lock); |
| 484 | |
| 485 | return fail; |
| 486 | } |
| 487 | |
| 488 | static int scrub_checksum_tree_block(struct scrub_dev *sdev, |
| 489 | struct scrub_page *spag, u64 logical, |
| 490 | void *buffer) |
| 491 | { |
| 492 | struct btrfs_header *h; |
| 493 | struct btrfs_root *root = sdev->dev->dev_root; |
| 494 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 495 | u8 csum[BTRFS_CSUM_SIZE]; |
| 496 | u32 crc = ~(u32)0; |
| 497 | int fail = 0; |
| 498 | int crc_fail = 0; |
| 499 | |
| 500 | /* |
| 501 | * we don't use the getter functions here, as we |
| 502 | * a) don't have an extent buffer and |
| 503 | * b) the page is already kmapped |
| 504 | */ |
| 505 | h = (struct btrfs_header *)buffer; |
| 506 | |
| 507 | if (logical != le64_to_cpu(h->bytenr)) |
| 508 | ++fail; |
| 509 | |
| 510 | if (spag->generation != le64_to_cpu(h->generation)) |
| 511 | ++fail; |
| 512 | |
| 513 | if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) |
| 514 | ++fail; |
| 515 | |
| 516 | if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, |
| 517 | BTRFS_UUID_SIZE)) |
| 518 | ++fail; |
| 519 | |
| 520 | crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, |
| 521 | PAGE_SIZE - BTRFS_CSUM_SIZE); |
| 522 | btrfs_csum_final(crc, csum); |
| 523 | if (memcmp(csum, h->csum, sdev->csum_size)) |
| 524 | ++crc_fail; |
| 525 | |
| 526 | spin_lock(&sdev->stat_lock); |
| 527 | ++sdev->stat.tree_extents_scrubbed; |
| 528 | sdev->stat.tree_bytes_scrubbed += PAGE_SIZE; |
| 529 | if (crc_fail) |
| 530 | ++sdev->stat.csum_errors; |
| 531 | if (fail) |
| 532 | ++sdev->stat.verify_errors; |
| 533 | spin_unlock(&sdev->stat_lock); |
| 534 | |
| 535 | return fail || crc_fail; |
| 536 | } |
| 537 | |
| 538 | static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) |
| 539 | { |
| 540 | struct btrfs_super_block *s; |
| 541 | u64 logical; |
| 542 | struct scrub_dev *sdev = sbio->sdev; |
| 543 | struct btrfs_root *root = sdev->dev->dev_root; |
| 544 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 545 | u8 csum[BTRFS_CSUM_SIZE]; |
| 546 | u32 crc = ~(u32)0; |
| 547 | int fail = 0; |
| 548 | |
| 549 | s = (struct btrfs_super_block *)buffer; |
| 550 | logical = sbio->logical; |
| 551 | |
| 552 | if (logical != le64_to_cpu(s->bytenr)) |
| 553 | ++fail; |
| 554 | |
| 555 | if (sbio->spag[0].generation != le64_to_cpu(s->generation)) |
| 556 | ++fail; |
| 557 | |
| 558 | if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) |
| 559 | ++fail; |
| 560 | |
| 561 | crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, |
| 562 | PAGE_SIZE - BTRFS_CSUM_SIZE); |
| 563 | btrfs_csum_final(crc, csum); |
| 564 | if (memcmp(csum, s->csum, sbio->sdev->csum_size)) |
| 565 | ++fail; |
| 566 | |
| 567 | if (fail) { |
| 568 | /* |
| 569 | * if we find an error in a super block, we just report it. |
| 570 | * They will get written with the next transaction commit |
| 571 | * anyway |
| 572 | */ |
| 573 | spin_lock(&sdev->stat_lock); |
| 574 | ++sdev->stat.super_errors; |
| 575 | spin_unlock(&sdev->stat_lock); |
| 576 | } |
| 577 | |
| 578 | return fail; |
| 579 | } |
| 580 | |
| 581 | static int scrub_submit(struct scrub_dev *sdev) |
| 582 | { |
| 583 | struct scrub_bio *sbio; |
| 584 | |
| 585 | if (sdev->curr == -1) |
| 586 | return 0; |
| 587 | |
| 588 | sbio = sdev->bios[sdev->curr]; |
| 589 | |
| 590 | sbio->bio->bi_sector = sbio->physical >> 9; |
| 591 | sbio->bio->bi_size = sbio->count * PAGE_SIZE; |
| 592 | sbio->bio->bi_next = NULL; |
| 593 | sbio->bio->bi_flags |= 1 << BIO_UPTODATE; |
| 594 | sbio->bio->bi_comp_cpu = -1; |
| 595 | sbio->bio->bi_bdev = sdev->dev->bdev; |
| 596 | sbio->err = 0; |
| 597 | sdev->curr = -1; |
| 598 | atomic_inc(&sdev->in_flight); |
| 599 | |
| 600 | submit_bio(0, sbio->bio); |
| 601 | |
| 602 | return 0; |
| 603 | } |
| 604 | |
| 605 | static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, |
| 606 | u64 physical, u64 flags, u64 gen, u64 mirror_num, |
| 607 | u8 *csum, int force) |
| 608 | { |
| 609 | struct scrub_bio *sbio; |
| 610 | |
| 611 | again: |
| 612 | /* |
| 613 | * grab a fresh bio or wait for one to become available |
| 614 | */ |
| 615 | while (sdev->curr == -1) { |
| 616 | spin_lock(&sdev->list_lock); |
| 617 | sdev->curr = sdev->first_free; |
| 618 | if (sdev->curr != -1) { |
| 619 | sdev->first_free = sdev->bios[sdev->curr]->next_free; |
| 620 | sdev->bios[sdev->curr]->next_free = -1; |
| 621 | sdev->bios[sdev->curr]->count = 0; |
| 622 | spin_unlock(&sdev->list_lock); |
| 623 | } else { |
| 624 | spin_unlock(&sdev->list_lock); |
| 625 | wait_event(sdev->list_wait, sdev->first_free != -1); |
| 626 | } |
| 627 | } |
| 628 | sbio = sdev->bios[sdev->curr]; |
| 629 | if (sbio->count == 0) { |
| 630 | sbio->physical = physical; |
| 631 | sbio->logical = logical; |
| 632 | } else if (sbio->physical + sbio->count * PAGE_SIZE != physical) { |
| 633 | scrub_submit(sdev); |
| 634 | goto again; |
| 635 | } |
| 636 | sbio->spag[sbio->count].flags = flags; |
| 637 | sbio->spag[sbio->count].generation = gen; |
| 638 | sbio->spag[sbio->count].have_csum = 0; |
| 639 | sbio->spag[sbio->count].mirror_num = mirror_num; |
| 640 | if (csum) { |
| 641 | sbio->spag[sbio->count].have_csum = 1; |
| 642 | memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); |
| 643 | } |
| 644 | ++sbio->count; |
| 645 | if (sbio->count == SCRUB_PAGES_PER_BIO || force) |
| 646 | scrub_submit(sdev); |
| 647 | |
| 648 | return 0; |
| 649 | } |
| 650 | |
| 651 | static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len, |
| 652 | u8 *csum) |
| 653 | { |
| 654 | struct btrfs_ordered_sum *sum = NULL; |
| 655 | int ret = 0; |
| 656 | unsigned long i; |
| 657 | unsigned long num_sectors; |
| 658 | u32 sectorsize = sdev->dev->dev_root->sectorsize; |
| 659 | |
| 660 | while (!list_empty(&sdev->csum_list)) { |
| 661 | sum = list_first_entry(&sdev->csum_list, |
| 662 | struct btrfs_ordered_sum, list); |
| 663 | if (sum->bytenr > logical) |
| 664 | return 0; |
| 665 | if (sum->bytenr + sum->len > logical) |
| 666 | break; |
| 667 | |
| 668 | ++sdev->stat.csum_discards; |
| 669 | list_del(&sum->list); |
| 670 | kfree(sum); |
| 671 | sum = NULL; |
| 672 | } |
| 673 | if (!sum) |
| 674 | return 0; |
| 675 | |
| 676 | num_sectors = sum->len / sectorsize; |
| 677 | for (i = 0; i < num_sectors; ++i) { |
| 678 | if (sum->sums[i].bytenr == logical) { |
| 679 | memcpy(csum, &sum->sums[i].sum, sdev->csum_size); |
| 680 | ret = 1; |
| 681 | break; |
| 682 | } |
| 683 | } |
| 684 | if (ret && i == num_sectors - 1) { |
| 685 | list_del(&sum->list); |
| 686 | kfree(sum); |
| 687 | } |
| 688 | return ret; |
| 689 | } |
| 690 | |
| 691 | /* scrub extent tries to collect up to 64 kB for each bio */ |
| 692 | static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len, |
| 693 | u64 physical, u64 flags, u64 gen, u64 mirror_num) |
| 694 | { |
| 695 | int ret; |
| 696 | u8 csum[BTRFS_CSUM_SIZE]; |
| 697 | |
| 698 | while (len) { |
| 699 | u64 l = min_t(u64, len, PAGE_SIZE); |
| 700 | int have_csum = 0; |
| 701 | |
| 702 | if (flags & BTRFS_EXTENT_FLAG_DATA) { |
| 703 | /* push csums to sbio */ |
| 704 | have_csum = scrub_find_csum(sdev, logical, l, csum); |
| 705 | if (have_csum == 0) |
| 706 | ++sdev->stat.no_csum; |
| 707 | } |
| 708 | ret = scrub_page(sdev, logical, l, physical, flags, gen, |
| 709 | mirror_num, have_csum ? csum : NULL, 0); |
| 710 | if (ret) |
| 711 | return ret; |
| 712 | len -= l; |
| 713 | logical += l; |
| 714 | physical += l; |
| 715 | } |
| 716 | return 0; |
| 717 | } |
| 718 | |
| 719 | static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, |
| 720 | struct map_lookup *map, int num, u64 base, u64 length) |
| 721 | { |
| 722 | struct btrfs_path *path; |
| 723 | struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; |
| 724 | struct btrfs_root *root = fs_info->extent_root; |
| 725 | struct btrfs_root *csum_root = fs_info->csum_root; |
| 726 | struct btrfs_extent_item *extent; |
| 727 | u64 flags; |
| 728 | int ret; |
| 729 | int slot; |
| 730 | int i; |
| 731 | u64 nstripes; |
| 732 | int start_stripe; |
| 733 | struct extent_buffer *l; |
| 734 | struct btrfs_key key; |
| 735 | u64 physical; |
| 736 | u64 logical; |
| 737 | u64 generation; |
| 738 | u64 mirror_num; |
| 739 | |
| 740 | u64 increment = map->stripe_len; |
| 741 | u64 offset; |
| 742 | |
| 743 | nstripes = length; |
| 744 | offset = 0; |
| 745 | do_div(nstripes, map->stripe_len); |
| 746 | if (map->type & BTRFS_BLOCK_GROUP_RAID0) { |
| 747 | offset = map->stripe_len * num; |
| 748 | increment = map->stripe_len * map->num_stripes; |
| 749 | mirror_num = 0; |
| 750 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { |
| 751 | int factor = map->num_stripes / map->sub_stripes; |
| 752 | offset = map->stripe_len * (num / map->sub_stripes); |
| 753 | increment = map->stripe_len * factor; |
| 754 | mirror_num = num % map->sub_stripes; |
| 755 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { |
| 756 | increment = map->stripe_len; |
| 757 | mirror_num = num % map->num_stripes; |
| 758 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { |
| 759 | increment = map->stripe_len; |
| 760 | mirror_num = num % map->num_stripes; |
| 761 | } else { |
| 762 | increment = map->stripe_len; |
| 763 | mirror_num = 0; |
| 764 | } |
| 765 | |
| 766 | path = btrfs_alloc_path(); |
| 767 | if (!path) |
| 768 | return -ENOMEM; |
| 769 | |
| 770 | path->reada = 2; |
| 771 | path->search_commit_root = 1; |
| 772 | path->skip_locking = 1; |
| 773 | |
| 774 | /* |
| 775 | * find all extents for each stripe and just read them to get |
| 776 | * them into the page cache |
| 777 | * FIXME: we can do better. build a more intelligent prefetching |
| 778 | */ |
| 779 | logical = base + offset; |
| 780 | physical = map->stripes[num].physical; |
| 781 | ret = 0; |
| 782 | for (i = 0; i < nstripes; ++i) { |
| 783 | key.objectid = logical; |
| 784 | key.type = BTRFS_EXTENT_ITEM_KEY; |
| 785 | key.offset = (u64)0; |
| 786 | |
| 787 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 788 | if (ret < 0) |
| 789 | goto out; |
| 790 | |
| 791 | l = path->nodes[0]; |
| 792 | slot = path->slots[0]; |
| 793 | btrfs_item_key_to_cpu(l, &key, slot); |
| 794 | if (key.objectid != logical) { |
| 795 | ret = btrfs_previous_item(root, path, 0, |
| 796 | BTRFS_EXTENT_ITEM_KEY); |
| 797 | if (ret < 0) |
| 798 | goto out; |
| 799 | } |
| 800 | |
| 801 | while (1) { |
| 802 | l = path->nodes[0]; |
| 803 | slot = path->slots[0]; |
| 804 | if (slot >= btrfs_header_nritems(l)) { |
| 805 | ret = btrfs_next_leaf(root, path); |
| 806 | if (ret == 0) |
| 807 | continue; |
| 808 | if (ret < 0) |
| 809 | goto out; |
| 810 | |
| 811 | break; |
| 812 | } |
| 813 | btrfs_item_key_to_cpu(l, &key, slot); |
| 814 | |
| 815 | if (key.objectid >= logical + map->stripe_len) |
| 816 | break; |
| 817 | |
| 818 | path->slots[0]++; |
| 819 | } |
| 820 | btrfs_release_path(root, path); |
| 821 | logical += increment; |
| 822 | physical += map->stripe_len; |
| 823 | cond_resched(); |
| 824 | } |
| 825 | |
| 826 | /* |
| 827 | * collect all data csums for the stripe to avoid seeking during |
| 828 | * the scrub. This might currently (crc32) end up to be about 1MB |
| 829 | */ |
| 830 | start_stripe = 0; |
| 831 | again: |
| 832 | logical = base + offset + start_stripe * increment; |
| 833 | for (i = start_stripe; i < nstripes; ++i) { |
| 834 | ret = btrfs_lookup_csums_range(csum_root, logical, |
| 835 | logical + map->stripe_len - 1, |
| 836 | &sdev->csum_list, 1); |
| 837 | if (ret) |
| 838 | goto out; |
| 839 | |
| 840 | logical += increment; |
| 841 | cond_resched(); |
| 842 | } |
| 843 | /* |
| 844 | * now find all extents for each stripe and scrub them |
| 845 | */ |
| 846 | logical = base + offset + start_stripe * increment; |
| 847 | physical = map->stripes[num].physical + start_stripe * map->stripe_len; |
| 848 | ret = 0; |
| 849 | for (i = start_stripe; i < nstripes; ++i) { |
| 850 | /* |
| 851 | * canceled? |
| 852 | */ |
| 853 | if (atomic_read(&fs_info->scrub_cancel_req) || |
| 854 | atomic_read(&sdev->cancel_req)) { |
| 855 | ret = -ECANCELED; |
| 856 | goto out; |
| 857 | } |
| 858 | /* |
| 859 | * check to see if we have to pause |
| 860 | */ |
| 861 | if (atomic_read(&fs_info->scrub_pause_req)) { |
| 862 | /* push queued extents */ |
| 863 | scrub_submit(sdev); |
| 864 | wait_event(sdev->list_wait, |
| 865 | atomic_read(&sdev->in_flight) == 0); |
| 866 | atomic_inc(&fs_info->scrubs_paused); |
| 867 | wake_up(&fs_info->scrub_pause_wait); |
| 868 | mutex_lock(&fs_info->scrub_lock); |
| 869 | while (atomic_read(&fs_info->scrub_pause_req)) { |
| 870 | mutex_unlock(&fs_info->scrub_lock); |
| 871 | wait_event(fs_info->scrub_pause_wait, |
| 872 | atomic_read(&fs_info->scrub_pause_req) == 0); |
| 873 | mutex_lock(&fs_info->scrub_lock); |
| 874 | } |
| 875 | atomic_dec(&fs_info->scrubs_paused); |
| 876 | mutex_unlock(&fs_info->scrub_lock); |
| 877 | wake_up(&fs_info->scrub_pause_wait); |
| 878 | scrub_free_csums(sdev); |
| 879 | start_stripe = i; |
| 880 | goto again; |
| 881 | } |
| 882 | |
| 883 | key.objectid = logical; |
| 884 | key.type = BTRFS_EXTENT_ITEM_KEY; |
| 885 | key.offset = (u64)0; |
| 886 | |
| 887 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 888 | if (ret < 0) |
| 889 | goto out; |
| 890 | |
| 891 | l = path->nodes[0]; |
| 892 | slot = path->slots[0]; |
| 893 | btrfs_item_key_to_cpu(l, &key, slot); |
| 894 | if (key.objectid != logical) { |
| 895 | ret = btrfs_previous_item(root, path, 0, |
| 896 | BTRFS_EXTENT_ITEM_KEY); |
| 897 | if (ret < 0) |
| 898 | goto out; |
| 899 | } |
| 900 | |
| 901 | while (1) { |
| 902 | l = path->nodes[0]; |
| 903 | slot = path->slots[0]; |
| 904 | if (slot >= btrfs_header_nritems(l)) { |
| 905 | ret = btrfs_next_leaf(root, path); |
| 906 | if (ret == 0) |
| 907 | continue; |
| 908 | if (ret < 0) |
| 909 | goto out; |
| 910 | |
| 911 | break; |
| 912 | } |
| 913 | btrfs_item_key_to_cpu(l, &key, slot); |
| 914 | |
| 915 | if (key.objectid + key.offset <= logical) |
| 916 | goto next; |
| 917 | |
| 918 | if (key.objectid >= logical + map->stripe_len) |
| 919 | break; |
| 920 | |
| 921 | if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) |
| 922 | goto next; |
| 923 | |
| 924 | extent = btrfs_item_ptr(l, slot, |
| 925 | struct btrfs_extent_item); |
| 926 | flags = btrfs_extent_flags(l, extent); |
| 927 | generation = btrfs_extent_generation(l, extent); |
| 928 | |
| 929 | if (key.objectid < logical && |
| 930 | (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) { |
| 931 | printk(KERN_ERR |
| 932 | "btrfs scrub: tree block %llu spanning " |
| 933 | "stripes, ignored. logical=%llu\n", |
| 934 | (unsigned long long)key.objectid, |
| 935 | (unsigned long long)logical); |
| 936 | goto next; |
| 937 | } |
| 938 | |
| 939 | /* |
| 940 | * trim extent to this stripe |
| 941 | */ |
| 942 | if (key.objectid < logical) { |
| 943 | key.offset -= logical - key.objectid; |
| 944 | key.objectid = logical; |
| 945 | } |
| 946 | if (key.objectid + key.offset > |
| 947 | logical + map->stripe_len) { |
| 948 | key.offset = logical + map->stripe_len - |
| 949 | key.objectid; |
| 950 | } |
| 951 | |
| 952 | ret = scrub_extent(sdev, key.objectid, key.offset, |
| 953 | key.objectid - logical + physical, |
| 954 | flags, generation, mirror_num); |
| 955 | if (ret) |
| 956 | goto out; |
| 957 | |
| 958 | next: |
| 959 | path->slots[0]++; |
| 960 | } |
| 961 | btrfs_release_path(root, path); |
| 962 | logical += increment; |
| 963 | physical += map->stripe_len; |
| 964 | spin_lock(&sdev->stat_lock); |
| 965 | sdev->stat.last_physical = physical; |
| 966 | spin_unlock(&sdev->stat_lock); |
| 967 | } |
| 968 | /* push queued extents */ |
| 969 | scrub_submit(sdev); |
| 970 | |
| 971 | out: |
| 972 | btrfs_free_path(path); |
| 973 | return ret < 0 ? ret : 0; |
| 974 | } |
| 975 | |
| 976 | static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, |
| 977 | u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length) |
| 978 | { |
| 979 | struct btrfs_mapping_tree *map_tree = |
| 980 | &sdev->dev->dev_root->fs_info->mapping_tree; |
| 981 | struct map_lookup *map; |
| 982 | struct extent_map *em; |
| 983 | int i; |
| 984 | int ret = -EINVAL; |
| 985 | |
| 986 | read_lock(&map_tree->map_tree.lock); |
| 987 | em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); |
| 988 | read_unlock(&map_tree->map_tree.lock); |
| 989 | |
| 990 | if (!em) |
| 991 | return -EINVAL; |
| 992 | |
| 993 | map = (struct map_lookup *)em->bdev; |
| 994 | if (em->start != chunk_offset) |
| 995 | goto out; |
| 996 | |
| 997 | if (em->len < length) |
| 998 | goto out; |
| 999 | |
| 1000 | for (i = 0; i < map->num_stripes; ++i) { |
| 1001 | if (map->stripes[i].dev == sdev->dev) { |
| 1002 | ret = scrub_stripe(sdev, map, i, chunk_offset, length); |
| 1003 | if (ret) |
| 1004 | goto out; |
| 1005 | } |
| 1006 | } |
| 1007 | out: |
| 1008 | free_extent_map(em); |
| 1009 | |
| 1010 | return ret; |
| 1011 | } |
| 1012 | |
| 1013 | static noinline_for_stack |
| 1014 | int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) |
| 1015 | { |
| 1016 | struct btrfs_dev_extent *dev_extent = NULL; |
| 1017 | struct btrfs_path *path; |
| 1018 | struct btrfs_root *root = sdev->dev->dev_root; |
| 1019 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 1020 | u64 length; |
| 1021 | u64 chunk_tree; |
| 1022 | u64 chunk_objectid; |
| 1023 | u64 chunk_offset; |
| 1024 | int ret; |
| 1025 | int slot; |
| 1026 | struct extent_buffer *l; |
| 1027 | struct btrfs_key key; |
| 1028 | struct btrfs_key found_key; |
| 1029 | struct btrfs_block_group_cache *cache; |
| 1030 | |
| 1031 | path = btrfs_alloc_path(); |
| 1032 | if (!path) |
| 1033 | return -ENOMEM; |
| 1034 | |
| 1035 | path->reada = 2; |
| 1036 | path->search_commit_root = 1; |
| 1037 | path->skip_locking = 1; |
| 1038 | |
| 1039 | key.objectid = sdev->dev->devid; |
| 1040 | key.offset = 0ull; |
| 1041 | key.type = BTRFS_DEV_EXTENT_KEY; |
| 1042 | |
| 1043 | |
| 1044 | while (1) { |
| 1045 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 1046 | if (ret < 0) |
| 1047 | goto out; |
| 1048 | ret = 0; |
| 1049 | |
| 1050 | l = path->nodes[0]; |
| 1051 | slot = path->slots[0]; |
| 1052 | |
| 1053 | btrfs_item_key_to_cpu(l, &found_key, slot); |
| 1054 | |
| 1055 | if (found_key.objectid != sdev->dev->devid) |
| 1056 | break; |
| 1057 | |
| 1058 | if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) |
| 1059 | break; |
| 1060 | |
| 1061 | if (found_key.offset >= end) |
| 1062 | break; |
| 1063 | |
| 1064 | if (found_key.offset < key.offset) |
| 1065 | break; |
| 1066 | |
| 1067 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); |
| 1068 | length = btrfs_dev_extent_length(l, dev_extent); |
| 1069 | |
| 1070 | if (found_key.offset + length <= start) { |
| 1071 | key.offset = found_key.offset + length; |
| 1072 | btrfs_release_path(root, path); |
| 1073 | continue; |
| 1074 | } |
| 1075 | |
| 1076 | chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); |
| 1077 | chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); |
| 1078 | chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); |
| 1079 | |
| 1080 | /* |
| 1081 | * get a reference on the corresponding block group to prevent |
| 1082 | * the chunk from going away while we scrub it |
| 1083 | */ |
| 1084 | cache = btrfs_lookup_block_group(fs_info, chunk_offset); |
| 1085 | if (!cache) { |
| 1086 | ret = -ENOENT; |
| 1087 | goto out; |
| 1088 | } |
| 1089 | ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, |
| 1090 | chunk_offset, length); |
| 1091 | btrfs_put_block_group(cache); |
| 1092 | if (ret) |
| 1093 | break; |
| 1094 | |
| 1095 | key.offset = found_key.offset + length; |
| 1096 | btrfs_release_path(root, path); |
| 1097 | } |
| 1098 | |
| 1099 | out: |
| 1100 | btrfs_free_path(path); |
| 1101 | return ret; |
| 1102 | } |
| 1103 | |
| 1104 | static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) |
| 1105 | { |
| 1106 | int i; |
| 1107 | u64 bytenr; |
| 1108 | u64 gen; |
| 1109 | int ret; |
| 1110 | struct btrfs_device *device = sdev->dev; |
| 1111 | struct btrfs_root *root = device->dev_root; |
| 1112 | |
| 1113 | gen = root->fs_info->last_trans_committed; |
| 1114 | |
| 1115 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { |
| 1116 | bytenr = btrfs_sb_offset(i); |
| 1117 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) |
| 1118 | break; |
| 1119 | |
| 1120 | ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr, |
| 1121 | BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1); |
| 1122 | if (ret) |
| 1123 | return ret; |
| 1124 | } |
| 1125 | wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); |
| 1126 | |
| 1127 | return 0; |
| 1128 | } |
| 1129 | |
| 1130 | /* |
| 1131 | * get a reference count on fs_info->scrub_workers. start worker if necessary |
| 1132 | */ |
| 1133 | static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) |
| 1134 | { |
| 1135 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 1136 | |
| 1137 | mutex_lock(&fs_info->scrub_lock); |
| 1138 | if (fs_info->scrub_workers_refcnt == 0) |
| 1139 | btrfs_start_workers(&fs_info->scrub_workers, 1); |
| 1140 | ++fs_info->scrub_workers_refcnt; |
| 1141 | mutex_unlock(&fs_info->scrub_lock); |
| 1142 | |
| 1143 | return 0; |
| 1144 | } |
| 1145 | |
| 1146 | static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) |
| 1147 | { |
| 1148 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 1149 | |
| 1150 | mutex_lock(&fs_info->scrub_lock); |
| 1151 | if (--fs_info->scrub_workers_refcnt == 0) |
| 1152 | btrfs_stop_workers(&fs_info->scrub_workers); |
| 1153 | WARN_ON(fs_info->scrub_workers_refcnt < 0); |
| 1154 | mutex_unlock(&fs_info->scrub_lock); |
| 1155 | } |
| 1156 | |
| 1157 | |
| 1158 | int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, |
| 1159 | struct btrfs_scrub_progress *progress) |
| 1160 | { |
| 1161 | struct scrub_dev *sdev; |
| 1162 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 1163 | int ret; |
| 1164 | struct btrfs_device *dev; |
| 1165 | |
| 1166 | if (root->fs_info->closing) |
| 1167 | return -EINVAL; |
| 1168 | |
| 1169 | /* |
| 1170 | * check some assumptions |
| 1171 | */ |
| 1172 | if (root->sectorsize != PAGE_SIZE || |
| 1173 | root->sectorsize != root->leafsize || |
| 1174 | root->sectorsize != root->nodesize) { |
| 1175 | printk(KERN_ERR "btrfs_scrub: size assumptions fail\n"); |
| 1176 | return -EINVAL; |
| 1177 | } |
| 1178 | |
| 1179 | ret = scrub_workers_get(root); |
| 1180 | if (ret) |
| 1181 | return ret; |
| 1182 | |
| 1183 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); |
| 1184 | dev = btrfs_find_device(root, devid, NULL, NULL); |
| 1185 | if (!dev || dev->missing) { |
| 1186 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
| 1187 | scrub_workers_put(root); |
| 1188 | return -ENODEV; |
| 1189 | } |
| 1190 | mutex_lock(&fs_info->scrub_lock); |
| 1191 | |
| 1192 | if (!dev->in_fs_metadata) { |
| 1193 | mutex_unlock(&fs_info->scrub_lock); |
| 1194 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
| 1195 | scrub_workers_put(root); |
| 1196 | return -ENODEV; |
| 1197 | } |
| 1198 | |
| 1199 | if (dev->scrub_device) { |
| 1200 | mutex_unlock(&fs_info->scrub_lock); |
| 1201 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
| 1202 | scrub_workers_put(root); |
| 1203 | return -EINPROGRESS; |
| 1204 | } |
| 1205 | sdev = scrub_setup_dev(dev); |
| 1206 | if (IS_ERR(sdev)) { |
| 1207 | mutex_unlock(&fs_info->scrub_lock); |
| 1208 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
| 1209 | scrub_workers_put(root); |
| 1210 | return PTR_ERR(sdev); |
| 1211 | } |
| 1212 | dev->scrub_device = sdev; |
| 1213 | |
| 1214 | atomic_inc(&fs_info->scrubs_running); |
| 1215 | mutex_unlock(&fs_info->scrub_lock); |
| 1216 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
| 1217 | |
| 1218 | down_read(&fs_info->scrub_super_lock); |
| 1219 | ret = scrub_supers(sdev); |
| 1220 | up_read(&fs_info->scrub_super_lock); |
| 1221 | |
| 1222 | if (!ret) |
| 1223 | ret = scrub_enumerate_chunks(sdev, start, end); |
| 1224 | |
| 1225 | wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); |
| 1226 | |
| 1227 | atomic_dec(&fs_info->scrubs_running); |
| 1228 | wake_up(&fs_info->scrub_pause_wait); |
| 1229 | |
| 1230 | if (progress) |
| 1231 | memcpy(progress, &sdev->stat, sizeof(*progress)); |
| 1232 | |
| 1233 | mutex_lock(&fs_info->scrub_lock); |
| 1234 | dev->scrub_device = NULL; |
| 1235 | mutex_unlock(&fs_info->scrub_lock); |
| 1236 | |
| 1237 | scrub_free_dev(sdev); |
| 1238 | scrub_workers_put(root); |
| 1239 | |
| 1240 | return ret; |
| 1241 | } |
| 1242 | |
| 1243 | int btrfs_scrub_pause(struct btrfs_root *root) |
| 1244 | { |
| 1245 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 1246 | |
| 1247 | mutex_lock(&fs_info->scrub_lock); |
| 1248 | atomic_inc(&fs_info->scrub_pause_req); |
| 1249 | while (atomic_read(&fs_info->scrubs_paused) != |
| 1250 | atomic_read(&fs_info->scrubs_running)) { |
| 1251 | mutex_unlock(&fs_info->scrub_lock); |
| 1252 | wait_event(fs_info->scrub_pause_wait, |
| 1253 | atomic_read(&fs_info->scrubs_paused) == |
| 1254 | atomic_read(&fs_info->scrubs_running)); |
| 1255 | mutex_lock(&fs_info->scrub_lock); |
| 1256 | } |
| 1257 | mutex_unlock(&fs_info->scrub_lock); |
| 1258 | |
| 1259 | return 0; |
| 1260 | } |
| 1261 | |
| 1262 | int btrfs_scrub_continue(struct btrfs_root *root) |
| 1263 | { |
| 1264 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 1265 | |
| 1266 | atomic_dec(&fs_info->scrub_pause_req); |
| 1267 | wake_up(&fs_info->scrub_pause_wait); |
| 1268 | return 0; |
| 1269 | } |
| 1270 | |
| 1271 | int btrfs_scrub_pause_super(struct btrfs_root *root) |
| 1272 | { |
| 1273 | down_write(&root->fs_info->scrub_super_lock); |
| 1274 | return 0; |
| 1275 | } |
| 1276 | |
| 1277 | int btrfs_scrub_continue_super(struct btrfs_root *root) |
| 1278 | { |
| 1279 | up_write(&root->fs_info->scrub_super_lock); |
| 1280 | return 0; |
| 1281 | } |
| 1282 | |
| 1283 | int btrfs_scrub_cancel(struct btrfs_root *root) |
| 1284 | { |
| 1285 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 1286 | |
| 1287 | mutex_lock(&fs_info->scrub_lock); |
| 1288 | if (!atomic_read(&fs_info->scrubs_running)) { |
| 1289 | mutex_unlock(&fs_info->scrub_lock); |
| 1290 | return -ENOTCONN; |
| 1291 | } |
| 1292 | |
| 1293 | atomic_inc(&fs_info->scrub_cancel_req); |
| 1294 | while (atomic_read(&fs_info->scrubs_running)) { |
| 1295 | mutex_unlock(&fs_info->scrub_lock); |
| 1296 | wait_event(fs_info->scrub_pause_wait, |
| 1297 | atomic_read(&fs_info->scrubs_running) == 0); |
| 1298 | mutex_lock(&fs_info->scrub_lock); |
| 1299 | } |
| 1300 | atomic_dec(&fs_info->scrub_cancel_req); |
| 1301 | mutex_unlock(&fs_info->scrub_lock); |
| 1302 | |
| 1303 | return 0; |
| 1304 | } |
| 1305 | |
| 1306 | int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev) |
| 1307 | { |
| 1308 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 1309 | struct scrub_dev *sdev; |
| 1310 | |
| 1311 | mutex_lock(&fs_info->scrub_lock); |
| 1312 | sdev = dev->scrub_device; |
| 1313 | if (!sdev) { |
| 1314 | mutex_unlock(&fs_info->scrub_lock); |
| 1315 | return -ENOTCONN; |
| 1316 | } |
| 1317 | atomic_inc(&sdev->cancel_req); |
| 1318 | while (dev->scrub_device) { |
| 1319 | mutex_unlock(&fs_info->scrub_lock); |
| 1320 | wait_event(fs_info->scrub_pause_wait, |
| 1321 | dev->scrub_device == NULL); |
| 1322 | mutex_lock(&fs_info->scrub_lock); |
| 1323 | } |
| 1324 | mutex_unlock(&fs_info->scrub_lock); |
| 1325 | |
| 1326 | return 0; |
| 1327 | } |
| 1328 | int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid) |
| 1329 | { |
| 1330 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 1331 | struct btrfs_device *dev; |
| 1332 | int ret; |
| 1333 | |
| 1334 | /* |
| 1335 | * we have to hold the device_list_mutex here so the device |
| 1336 | * does not go away in cancel_dev. FIXME: find a better solution |
| 1337 | */ |
| 1338 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
| 1339 | dev = btrfs_find_device(root, devid, NULL, NULL); |
| 1340 | if (!dev) { |
| 1341 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
| 1342 | return -ENODEV; |
| 1343 | } |
| 1344 | ret = btrfs_scrub_cancel_dev(root, dev); |
| 1345 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
| 1346 | |
| 1347 | return ret; |
| 1348 | } |
| 1349 | |
| 1350 | int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, |
| 1351 | struct btrfs_scrub_progress *progress) |
| 1352 | { |
| 1353 | struct btrfs_device *dev; |
| 1354 | struct scrub_dev *sdev = NULL; |
| 1355 | |
| 1356 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); |
| 1357 | dev = btrfs_find_device(root, devid, NULL, NULL); |
| 1358 | if (dev) |
| 1359 | sdev = dev->scrub_device; |
| 1360 | if (sdev) |
| 1361 | memcpy(progress, &sdev->stat, sizeof(*progress)); |
| 1362 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
| 1363 | |
| 1364 | return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV; |
| 1365 | } |