| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1 | /* | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 2 |  * Copyright (C) 2011, 2012 STRATO.  All rights reserved. | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or | 
 | 5 |  * modify it under the terms of the GNU General Public | 
 | 6 |  * License v2 as published by the Free Software Foundation. | 
 | 7 |  * | 
 | 8 |  * This program is distributed in the hope that it will be useful, | 
 | 9 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 10 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
 | 11 |  * General Public License for more details. | 
 | 12 |  * | 
 | 13 |  * You should have received a copy of the GNU General Public | 
 | 14 |  * License along with this program; if not, write to the | 
 | 15 |  * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
 | 16 |  * Boston, MA 021110-1307, USA. | 
 | 17 |  */ | 
 | 18 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 19 | #include <linux/blkdev.h> | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 20 | #include <linux/ratelimit.h> | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 21 | #include "ctree.h" | 
 | 22 | #include "volumes.h" | 
 | 23 | #include "disk-io.h" | 
 | 24 | #include "ordered-data.h" | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 25 | #include "transaction.h" | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 26 | #include "backref.h" | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 27 | #include "extent_io.h" | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 28 | #include "dev-replace.h" | 
| Stefan Behrens | 21adbd5 | 2011-11-09 13:44:05 +0100 | [diff] [blame] | 29 | #include "check-integrity.h" | 
| Josef Bacik | 606686e | 2012-06-04 14:03:51 -0400 | [diff] [blame] | 30 | #include "rcu-string.h" | 
| David Woodhouse | 53b381b | 2013-01-29 18:40:14 -0500 | [diff] [blame] | 31 | #include "raid56.h" | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 32 |  | 
 | 33 | /* | 
 | 34 |  * This is only the first step towards a full-features scrub. It reads all | 
 | 35 |  * extent and super block and verifies the checksums. In case a bad checksum | 
 | 36 |  * is found or the extent cannot be read, good data will be written back if | 
 | 37 |  * any can be found. | 
 | 38 |  * | 
 | 39 |  * Future enhancements: | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 40 |  *  - In case an unrepairable extent is encountered, track which files are | 
 | 41 |  *    affected and report them | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 42 |  *  - track and record media errors, throw out bad devices | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 43 |  *  - add a mode to also read unallocated space | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 44 |  */ | 
 | 45 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 46 | struct scrub_block; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 47 | struct scrub_ctx; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 48 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 49 | /* | 
 | 50 |  * the following three values only influence the performance. | 
 | 51 |  * The last one configures the number of parallel and outstanding I/O | 
 | 52 |  * operations. The first two values configure an upper limit for the number | 
 | 53 |  * of (dynamically allocated) pages that are added to a bio. | 
 | 54 |  */ | 
 | 55 | #define SCRUB_PAGES_PER_RD_BIO	32	/* 128k per bio */ | 
 | 56 | #define SCRUB_PAGES_PER_WR_BIO	32	/* 128k per bio */ | 
 | 57 | #define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */ | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 58 |  | 
 | 59 | /* | 
 | 60 |  * the following value times PAGE_SIZE needs to be large enough to match the | 
 | 61 |  * largest node/leaf/sector size that shall be supported. | 
 | 62 |  * Values larger than BTRFS_STRIPE_LEN are not supported. | 
 | 63 |  */ | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 64 | #define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */ | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 65 |  | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 66 | struct scrub_recover { | 
| Elena Reshetova | 6f61501 | 2017-03-03 10:55:21 +0200 | [diff] [blame] | 67 | 	refcount_t		refs; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 68 | 	struct btrfs_bio	*bbio; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 69 | 	u64			map_length; | 
 | 70 | }; | 
 | 71 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 72 | struct scrub_page { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 73 | 	struct scrub_block	*sblock; | 
 | 74 | 	struct page		*page; | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 75 | 	struct btrfs_device	*dev; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 76 | 	struct list_head	list; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 77 | 	u64			flags;  /* extent flags */ | 
 | 78 | 	u64			generation; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 79 | 	u64			logical; | 
 | 80 | 	u64			physical; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 81 | 	u64			physical_for_dev_replace; | 
| Zhao Lei | 5701934 | 2015-01-20 15:11:45 +0800 | [diff] [blame] | 82 | 	atomic_t		refs; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 83 | 	struct { | 
 | 84 | 		unsigned int	mirror_num:8; | 
 | 85 | 		unsigned int	have_csum:1; | 
 | 86 | 		unsigned int	io_error:1; | 
 | 87 | 	}; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 88 | 	u8			csum[BTRFS_CSUM_SIZE]; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 89 |  | 
 | 90 | 	struct scrub_recover	*recover; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 91 | }; | 
 | 92 |  | 
 | 93 | struct scrub_bio { | 
 | 94 | 	int			index; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 95 | 	struct scrub_ctx	*sctx; | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 96 | 	struct btrfs_device	*dev; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 97 | 	struct bio		*bio; | 
 | 98 | 	int			err; | 
 | 99 | 	u64			logical; | 
 | 100 | 	u64			physical; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 101 | #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO | 
 | 102 | 	struct scrub_page	*pagev[SCRUB_PAGES_PER_WR_BIO]; | 
 | 103 | #else | 
 | 104 | 	struct scrub_page	*pagev[SCRUB_PAGES_PER_RD_BIO]; | 
 | 105 | #endif | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 106 | 	int			page_count; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 107 | 	int			next_free; | 
 | 108 | 	struct btrfs_work	work; | 
 | 109 | }; | 
 | 110 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 111 | struct scrub_block { | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 112 | 	struct scrub_page	*pagev[SCRUB_MAX_PAGES_PER_BLOCK]; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 113 | 	int			page_count; | 
 | 114 | 	atomic_t		outstanding_pages; | 
| Elena Reshetova | 186debd | 2017-03-03 10:55:23 +0200 | [diff] [blame] | 115 | 	refcount_t		refs; /* free mem on transition to zero */ | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 116 | 	struct scrub_ctx	*sctx; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 117 | 	struct scrub_parity	*sparity; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 118 | 	struct { | 
 | 119 | 		unsigned int	header_error:1; | 
 | 120 | 		unsigned int	checksum_error:1; | 
 | 121 | 		unsigned int	no_io_error_seen:1; | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 122 | 		unsigned int	generation_error:1; /* also sets header_error */ | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 123 |  | 
 | 124 | 		/* The following is for the data used to check parity */ | 
 | 125 | 		/* It is for the data with checksum */ | 
 | 126 | 		unsigned int	data_corrected:1; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 127 | 	}; | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 128 | 	struct btrfs_work	work; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 129 | }; | 
 | 130 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 131 | /* Used for the chunks with parity stripe such RAID5/6 */ | 
 | 132 | struct scrub_parity { | 
 | 133 | 	struct scrub_ctx	*sctx; | 
 | 134 |  | 
 | 135 | 	struct btrfs_device	*scrub_dev; | 
 | 136 |  | 
 | 137 | 	u64			logic_start; | 
 | 138 |  | 
 | 139 | 	u64			logic_end; | 
 | 140 |  | 
 | 141 | 	int			nsectors; | 
 | 142 |  | 
| Liu Bo | 972d721 | 2017-04-03 13:45:33 -0700 | [diff] [blame] | 143 | 	u64			stripe_len; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 144 |  | 
| Elena Reshetova | 78a7645 | 2017-03-03 10:55:24 +0200 | [diff] [blame] | 145 | 	refcount_t		refs; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 146 |  | 
 | 147 | 	struct list_head	spages; | 
 | 148 |  | 
 | 149 | 	/* Work of parity check and repair */ | 
 | 150 | 	struct btrfs_work	work; | 
 | 151 |  | 
 | 152 | 	/* Mark the parity blocks which have data */ | 
 | 153 | 	unsigned long		*dbitmap; | 
 | 154 |  | 
 | 155 | 	/* | 
 | 156 | 	 * Mark the parity blocks which have data, but errors happen when | 
 | 157 | 	 * read data or check data | 
 | 158 | 	 */ | 
 | 159 | 	unsigned long		*ebitmap; | 
 | 160 |  | 
 | 161 | 	unsigned long		bitmap[0]; | 
 | 162 | }; | 
 | 163 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 164 | struct scrub_wr_ctx { | 
 | 165 | 	struct scrub_bio *wr_curr_bio; | 
 | 166 | 	struct btrfs_device *tgtdev; | 
 | 167 | 	int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */ | 
 | 168 | 	atomic_t flush_all_writes; | 
 | 169 | 	struct mutex wr_lock; | 
 | 170 | }; | 
 | 171 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 172 | struct scrub_ctx { | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 173 | 	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX]; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 174 | 	struct btrfs_fs_info	*fs_info; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 175 | 	int			first_free; | 
 | 176 | 	int			curr; | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 177 | 	atomic_t		bios_in_flight; | 
 | 178 | 	atomic_t		workers_pending; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 179 | 	spinlock_t		list_lock; | 
 | 180 | 	wait_queue_head_t	list_wait; | 
 | 181 | 	u16			csum_size; | 
 | 182 | 	struct list_head	csum_list; | 
 | 183 | 	atomic_t		cancel_req; | 
| Arne Jansen | 8628764 | 2011-03-23 16:34:19 +0100 | [diff] [blame] | 184 | 	int			readonly; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 185 | 	int			pages_per_rd_bio; | 
| Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 186 |  | 
 | 187 | 	int			is_dev_replace; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 188 | 	struct scrub_wr_ctx	wr_ctx; | 
| Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 189 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 190 | 	/* | 
 | 191 | 	 * statistics | 
 | 192 | 	 */ | 
 | 193 | 	struct btrfs_scrub_progress stat; | 
 | 194 | 	spinlock_t		stat_lock; | 
| Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 195 |  | 
 | 196 | 	/* | 
 | 197 | 	 * Use a ref counter to avoid use-after-free issues. Scrub workers | 
 | 198 | 	 * decrement bios_in_flight and workers_pending and then do a wakeup | 
 | 199 | 	 * on the list_wait wait queue. We must ensure the main scrub task | 
 | 200 | 	 * doesn't free the scrub context before or while the workers are | 
 | 201 | 	 * doing the wakeup() call. | 
 | 202 | 	 */ | 
| Elena Reshetova | 99f4cdb | 2017-03-03 10:55:25 +0200 | [diff] [blame] | 203 | 	refcount_t              refs; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 204 | }; | 
 | 205 |  | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 206 | struct scrub_fixup_nodatasum { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 207 | 	struct scrub_ctx	*sctx; | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 208 | 	struct btrfs_device	*dev; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 209 | 	u64			logical; | 
 | 210 | 	struct btrfs_root	*root; | 
 | 211 | 	struct btrfs_work	work; | 
 | 212 | 	int			mirror_num; | 
 | 213 | }; | 
 | 214 |  | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 215 | struct scrub_nocow_inode { | 
 | 216 | 	u64			inum; | 
 | 217 | 	u64			offset; | 
 | 218 | 	u64			root; | 
 | 219 | 	struct list_head	list; | 
 | 220 | }; | 
 | 221 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 222 | struct scrub_copy_nocow_ctx { | 
 | 223 | 	struct scrub_ctx	*sctx; | 
 | 224 | 	u64			logical; | 
 | 225 | 	u64			len; | 
 | 226 | 	int			mirror_num; | 
 | 227 | 	u64			physical_for_dev_replace; | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 228 | 	struct list_head	inodes; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 229 | 	struct btrfs_work	work; | 
 | 230 | }; | 
 | 231 |  | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 232 | struct scrub_warning { | 
 | 233 | 	struct btrfs_path	*path; | 
 | 234 | 	u64			extent_item_size; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 235 | 	const char		*errstr; | 
 | 236 | 	sector_t		sector; | 
 | 237 | 	u64			logical; | 
 | 238 | 	struct btrfs_device	*dev; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 239 | }; | 
 | 240 |  | 
| Qu Wenruo | 0966a7b | 2017-04-14 08:35:54 +0800 | [diff] [blame] | 241 | struct full_stripe_lock { | 
 | 242 | 	struct rb_node node; | 
 | 243 | 	u64 logical; | 
 | 244 | 	u64 refs; | 
 | 245 | 	struct mutex mutex; | 
 | 246 | }; | 
 | 247 |  | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 248 | static void scrub_pending_bio_inc(struct scrub_ctx *sctx); | 
 | 249 | static void scrub_pending_bio_dec(struct scrub_ctx *sctx); | 
 | 250 | static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx); | 
 | 251 | static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 252 | static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); | 
| Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 253 | static int scrub_setup_recheck_block(struct scrub_block *original_sblock, | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 254 | 				     struct scrub_block *sblocks_for_recheck); | 
| Stefan Behrens | 34f5c8e | 2012-11-02 16:16:26 +0100 | [diff] [blame] | 255 | static void scrub_recheck_block(struct btrfs_fs_info *fs_info, | 
| Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 256 | 				struct scrub_block *sblock, | 
 | 257 | 				int retry_failed_mirror); | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 258 | static void scrub_recheck_block_checksum(struct scrub_block *sblock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 259 | static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, | 
| Zhao Lei | 114ab50 | 2015-01-20 15:11:36 +0800 | [diff] [blame] | 260 | 					     struct scrub_block *sblock_good); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 261 | static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, | 
 | 262 | 					    struct scrub_block *sblock_good, | 
 | 263 | 					    int page_num, int force_write); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 264 | static void scrub_write_block_to_dev_replace(struct scrub_block *sblock); | 
 | 265 | static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, | 
 | 266 | 					   int page_num); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 267 | static int scrub_checksum_data(struct scrub_block *sblock); | 
 | 268 | static int scrub_checksum_tree_block(struct scrub_block *sblock); | 
 | 269 | static int scrub_checksum_super(struct scrub_block *sblock); | 
 | 270 | static void scrub_block_get(struct scrub_block *sblock); | 
 | 271 | static void scrub_block_put(struct scrub_block *sblock); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 272 | static void scrub_page_get(struct scrub_page *spage); | 
 | 273 | static void scrub_page_put(struct scrub_page *spage); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 274 | static void scrub_parity_get(struct scrub_parity *sparity); | 
 | 275 | static void scrub_parity_put(struct scrub_parity *sparity); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 276 | static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, | 
 | 277 | 				    struct scrub_page *spage); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 278 | static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 279 | 		       u64 physical, struct btrfs_device *dev, u64 flags, | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 280 | 		       u64 gen, int mirror_num, u8 *csum, int force, | 
 | 281 | 		       u64 physical_for_dev_replace); | 
| Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 282 | static void scrub_bio_end_io(struct bio *bio); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 283 | static void scrub_bio_end_io_worker(struct btrfs_work *work); | 
 | 284 | static void scrub_block_complete(struct scrub_block *sblock); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 285 | static void scrub_remap_extent(struct btrfs_fs_info *fs_info, | 
 | 286 | 			       u64 extent_logical, u64 extent_len, | 
 | 287 | 			       u64 *extent_physical, | 
 | 288 | 			       struct btrfs_device **extent_dev, | 
 | 289 | 			       int *extent_mirror_num); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 290 | static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, | 
 | 291 | 				    struct scrub_page *spage); | 
 | 292 | static void scrub_wr_submit(struct scrub_ctx *sctx); | 
| Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 293 | static void scrub_wr_bio_end_io(struct bio *bio); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 294 | static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); | 
 | 295 | static int write_page_nocow(struct scrub_ctx *sctx, | 
 | 296 | 			    u64 physical_for_dev_replace, struct page *page); | 
 | 297 | static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 298 | 				      struct scrub_copy_nocow_ctx *ctx); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 299 | static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, | 
 | 300 | 			    int mirror_num, u64 physical_for_dev_replace); | 
 | 301 | static void copy_nocow_pages_worker(struct btrfs_work *work); | 
| Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 302 | static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); | 
| Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 303 | static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); | 
| Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 304 | static void scrub_put_ctx(struct scrub_ctx *sctx); | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 305 |  | 
 | 306 |  | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 307 | static void scrub_pending_bio_inc(struct scrub_ctx *sctx) | 
 | 308 | { | 
| Elena Reshetova | 99f4cdb | 2017-03-03 10:55:25 +0200 | [diff] [blame] | 309 | 	refcount_inc(&sctx->refs); | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 310 | 	atomic_inc(&sctx->bios_in_flight); | 
 | 311 | } | 
 | 312 |  | 
 | 313 | static void scrub_pending_bio_dec(struct scrub_ctx *sctx) | 
 | 314 | { | 
 | 315 | 	atomic_dec(&sctx->bios_in_flight); | 
 | 316 | 	wake_up(&sctx->list_wait); | 
| Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 317 | 	scrub_put_ctx(sctx); | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 318 | } | 
 | 319 |  | 
| Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 320 | static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) | 
| Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 321 | { | 
 | 322 | 	while (atomic_read(&fs_info->scrub_pause_req)) { | 
 | 323 | 		mutex_unlock(&fs_info->scrub_lock); | 
 | 324 | 		wait_event(fs_info->scrub_pause_wait, | 
 | 325 | 		   atomic_read(&fs_info->scrub_pause_req) == 0); | 
 | 326 | 		mutex_lock(&fs_info->scrub_lock); | 
 | 327 | 	} | 
 | 328 | } | 
 | 329 |  | 
| Zhaolei | 0e22be8 | 2015-08-05 16:43:28 +0800 | [diff] [blame] | 330 | static void scrub_pause_on(struct btrfs_fs_info *fs_info) | 
| Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 331 | { | 
 | 332 | 	atomic_inc(&fs_info->scrubs_paused); | 
 | 333 | 	wake_up(&fs_info->scrub_pause_wait); | 
| Zhaolei | 0e22be8 | 2015-08-05 16:43:28 +0800 | [diff] [blame] | 334 | } | 
| Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 335 |  | 
| Zhaolei | 0e22be8 | 2015-08-05 16:43:28 +0800 | [diff] [blame] | 336 | static void scrub_pause_off(struct btrfs_fs_info *fs_info) | 
 | 337 | { | 
| Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 338 | 	mutex_lock(&fs_info->scrub_lock); | 
 | 339 | 	__scrub_blocked_if_needed(fs_info); | 
 | 340 | 	atomic_dec(&fs_info->scrubs_paused); | 
 | 341 | 	mutex_unlock(&fs_info->scrub_lock); | 
 | 342 |  | 
 | 343 | 	wake_up(&fs_info->scrub_pause_wait); | 
 | 344 | } | 
 | 345 |  | 
| Zhaolei | 0e22be8 | 2015-08-05 16:43:28 +0800 | [diff] [blame] | 346 | static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) | 
 | 347 | { | 
 | 348 | 	scrub_pause_on(fs_info); | 
 | 349 | 	scrub_pause_off(fs_info); | 
 | 350 | } | 
 | 351 |  | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 352 | /* | 
| Qu Wenruo | 0966a7b | 2017-04-14 08:35:54 +0800 | [diff] [blame] | 353 |  * Insert new full stripe lock into full stripe locks tree | 
 | 354 |  * | 
 | 355 |  * Return pointer to existing or newly inserted full_stripe_lock structure if | 
 | 356 |  * everything works well. | 
 | 357 |  * Return ERR_PTR(-ENOMEM) if we failed to allocate memory | 
 | 358 |  * | 
 | 359 |  * NOTE: caller must hold full_stripe_locks_root->lock before calling this | 
 | 360 |  * function | 
 | 361 |  */ | 
 | 362 | static struct full_stripe_lock *insert_full_stripe_lock( | 
 | 363 | 		struct btrfs_full_stripe_locks_tree *locks_root, | 
 | 364 | 		u64 fstripe_logical) | 
 | 365 | { | 
 | 366 | 	struct rb_node **p; | 
 | 367 | 	struct rb_node *parent = NULL; | 
 | 368 | 	struct full_stripe_lock *entry; | 
 | 369 | 	struct full_stripe_lock *ret; | 
 | 370 |  | 
 | 371 | 	WARN_ON(!mutex_is_locked(&locks_root->lock)); | 
 | 372 |  | 
 | 373 | 	p = &locks_root->root.rb_node; | 
 | 374 | 	while (*p) { | 
 | 375 | 		parent = *p; | 
 | 376 | 		entry = rb_entry(parent, struct full_stripe_lock, node); | 
 | 377 | 		if (fstripe_logical < entry->logical) { | 
 | 378 | 			p = &(*p)->rb_left; | 
 | 379 | 		} else if (fstripe_logical > entry->logical) { | 
 | 380 | 			p = &(*p)->rb_right; | 
 | 381 | 		} else { | 
 | 382 | 			entry->refs++; | 
 | 383 | 			return entry; | 
 | 384 | 		} | 
 | 385 | 	} | 
 | 386 |  | 
 | 387 | 	/* Insert new lock */ | 
 | 388 | 	ret = kmalloc(sizeof(*ret), GFP_KERNEL); | 
 | 389 | 	if (!ret) | 
 | 390 | 		return ERR_PTR(-ENOMEM); | 
 | 391 | 	ret->logical = fstripe_logical; | 
 | 392 | 	ret->refs = 1; | 
 | 393 | 	mutex_init(&ret->mutex); | 
 | 394 |  | 
 | 395 | 	rb_link_node(&ret->node, parent, p); | 
 | 396 | 	rb_insert_color(&ret->node, &locks_root->root); | 
 | 397 | 	return ret; | 
 | 398 | } | 
 | 399 |  | 
 | 400 | /* | 
 | 401 |  * Search for a full stripe lock of a block group | 
 | 402 |  * | 
 | 403 |  * Return pointer to existing full stripe lock if found | 
 | 404 |  * Return NULL if not found | 
 | 405 |  */ | 
 | 406 | static struct full_stripe_lock *search_full_stripe_lock( | 
 | 407 | 		struct btrfs_full_stripe_locks_tree *locks_root, | 
 | 408 | 		u64 fstripe_logical) | 
 | 409 | { | 
 | 410 | 	struct rb_node *node; | 
 | 411 | 	struct full_stripe_lock *entry; | 
 | 412 |  | 
 | 413 | 	WARN_ON(!mutex_is_locked(&locks_root->lock)); | 
 | 414 |  | 
 | 415 | 	node = locks_root->root.rb_node; | 
 | 416 | 	while (node) { | 
 | 417 | 		entry = rb_entry(node, struct full_stripe_lock, node); | 
 | 418 | 		if (fstripe_logical < entry->logical) | 
 | 419 | 			node = node->rb_left; | 
 | 420 | 		else if (fstripe_logical > entry->logical) | 
 | 421 | 			node = node->rb_right; | 
 | 422 | 		else | 
 | 423 | 			return entry; | 
 | 424 | 	} | 
 | 425 | 	return NULL; | 
 | 426 | } | 
 | 427 |  | 
 | 428 | /* | 
 | 429 |  * Helper to get full stripe logical from a normal bytenr. | 
 | 430 |  * | 
 | 431 |  * Caller must ensure @cache is a RAID56 block group. | 
 | 432 |  */ | 
 | 433 | static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, | 
 | 434 | 				   u64 bytenr) | 
 | 435 | { | 
 | 436 | 	u64 ret; | 
 | 437 |  | 
 | 438 | 	/* | 
 | 439 | 	 * Due to chunk item size limit, full stripe length should not be | 
 | 440 | 	 * larger than U32_MAX. Just a sanity check here. | 
 | 441 | 	 */ | 
 | 442 | 	WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX); | 
 | 443 |  | 
 | 444 | 	/* | 
 | 445 | 	 * round_down() can only handle power of 2, while RAID56 full | 
 | 446 | 	 * stripe length can be 64KiB * n, so we need to manually round down. | 
 | 447 | 	 */ | 
 | 448 | 	ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) * | 
 | 449 | 		cache->full_stripe_len + cache->key.objectid; | 
 | 450 | 	return ret; | 
 | 451 | } | 
 | 452 |  | 
 | 453 | /* | 
 | 454 |  * Lock a full stripe to avoid concurrency of recovery and read | 
 | 455 |  * | 
 | 456 |  * It's only used for profiles with parities (RAID5/6), for other profiles it | 
 | 457 |  * does nothing. | 
 | 458 |  * | 
 | 459 |  * Return 0 if we locked full stripe covering @bytenr, with a mutex held. | 
 | 460 |  * So caller must call unlock_full_stripe() at the same context. | 
 | 461 |  * | 
 | 462 |  * Return <0 if encounters error. | 
 | 463 |  */ | 
 | 464 | static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, | 
 | 465 | 			    bool *locked_ret) | 
 | 466 | { | 
 | 467 | 	struct btrfs_block_group_cache *bg_cache; | 
 | 468 | 	struct btrfs_full_stripe_locks_tree *locks_root; | 
 | 469 | 	struct full_stripe_lock *existing; | 
 | 470 | 	u64 fstripe_start; | 
 | 471 | 	int ret = 0; | 
 | 472 |  | 
 | 473 | 	*locked_ret = false; | 
 | 474 | 	bg_cache = btrfs_lookup_block_group(fs_info, bytenr); | 
 | 475 | 	if (!bg_cache) { | 
 | 476 | 		ASSERT(0); | 
 | 477 | 		return -ENOENT; | 
 | 478 | 	} | 
 | 479 |  | 
 | 480 | 	/* Profiles not based on parity don't need full stripe lock */ | 
 | 481 | 	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) | 
 | 482 | 		goto out; | 
 | 483 | 	locks_root = &bg_cache->full_stripe_locks_root; | 
 | 484 |  | 
 | 485 | 	fstripe_start = get_full_stripe_logical(bg_cache, bytenr); | 
 | 486 |  | 
 | 487 | 	/* Now insert the full stripe lock */ | 
 | 488 | 	mutex_lock(&locks_root->lock); | 
 | 489 | 	existing = insert_full_stripe_lock(locks_root, fstripe_start); | 
 | 490 | 	mutex_unlock(&locks_root->lock); | 
 | 491 | 	if (IS_ERR(existing)) { | 
 | 492 | 		ret = PTR_ERR(existing); | 
 | 493 | 		goto out; | 
 | 494 | 	} | 
 | 495 | 	mutex_lock(&existing->mutex); | 
 | 496 | 	*locked_ret = true; | 
 | 497 | out: | 
 | 498 | 	btrfs_put_block_group(bg_cache); | 
 | 499 | 	return ret; | 
 | 500 | } | 
 | 501 |  | 
 | 502 | /* | 
 | 503 |  * Unlock a full stripe. | 
 | 504 |  * | 
 | 505 |  * NOTE: Caller must ensure it's the same context calling corresponding | 
 | 506 |  * lock_full_stripe(). | 
 | 507 |  * | 
 | 508 |  * Return 0 if we unlock full stripe without problem. | 
 | 509 |  * Return <0 for error | 
 | 510 |  */ | 
 | 511 | static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, | 
 | 512 | 			      bool locked) | 
 | 513 | { | 
 | 514 | 	struct btrfs_block_group_cache *bg_cache; | 
 | 515 | 	struct btrfs_full_stripe_locks_tree *locks_root; | 
 | 516 | 	struct full_stripe_lock *fstripe_lock; | 
 | 517 | 	u64 fstripe_start; | 
 | 518 | 	bool freeit = false; | 
 | 519 | 	int ret = 0; | 
 | 520 |  | 
 | 521 | 	/* If we didn't acquire full stripe lock, no need to continue */ | 
 | 522 | 	if (!locked) | 
 | 523 | 		return 0; | 
 | 524 |  | 
 | 525 | 	bg_cache = btrfs_lookup_block_group(fs_info, bytenr); | 
 | 526 | 	if (!bg_cache) { | 
 | 527 | 		ASSERT(0); | 
 | 528 | 		return -ENOENT; | 
 | 529 | 	} | 
 | 530 | 	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) | 
 | 531 | 		goto out; | 
 | 532 |  | 
 | 533 | 	locks_root = &bg_cache->full_stripe_locks_root; | 
 | 534 | 	fstripe_start = get_full_stripe_logical(bg_cache, bytenr); | 
 | 535 |  | 
 | 536 | 	mutex_lock(&locks_root->lock); | 
 | 537 | 	fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start); | 
 | 538 | 	/* Unpaired unlock_full_stripe() detected */ | 
 | 539 | 	if (!fstripe_lock) { | 
 | 540 | 		WARN_ON(1); | 
 | 541 | 		ret = -ENOENT; | 
 | 542 | 		mutex_unlock(&locks_root->lock); | 
 | 543 | 		goto out; | 
 | 544 | 	} | 
 | 545 |  | 
 | 546 | 	if (fstripe_lock->refs == 0) { | 
 | 547 | 		WARN_ON(1); | 
 | 548 | 		btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow", | 
 | 549 | 			fstripe_lock->logical); | 
 | 550 | 	} else { | 
 | 551 | 		fstripe_lock->refs--; | 
 | 552 | 	} | 
 | 553 |  | 
 | 554 | 	if (fstripe_lock->refs == 0) { | 
 | 555 | 		rb_erase(&fstripe_lock->node, &locks_root->root); | 
 | 556 | 		freeit = true; | 
 | 557 | 	} | 
 | 558 | 	mutex_unlock(&locks_root->lock); | 
 | 559 |  | 
 | 560 | 	mutex_unlock(&fstripe_lock->mutex); | 
 | 561 | 	if (freeit) | 
 | 562 | 		kfree(fstripe_lock); | 
 | 563 | out: | 
 | 564 | 	btrfs_put_block_group(bg_cache); | 
 | 565 | 	return ret; | 
 | 566 | } | 
 | 567 |  | 
 | 568 | /* | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 569 |  * used for workers that require transaction commits (i.e., for the | 
 | 570 |  * NOCOW case) | 
 | 571 |  */ | 
 | 572 | static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx) | 
 | 573 | { | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 574 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 575 |  | 
| Elena Reshetova | 99f4cdb | 2017-03-03 10:55:25 +0200 | [diff] [blame] | 576 | 	refcount_inc(&sctx->refs); | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 577 | 	/* | 
 | 578 | 	 * increment scrubs_running to prevent cancel requests from | 
 | 579 | 	 * completing as long as a worker is running. we must also | 
 | 580 | 	 * increment scrubs_paused to prevent deadlocking on pause | 
 | 581 | 	 * requests used for transactions commits (as the worker uses a | 
 | 582 | 	 * transaction context). it is safe to regard the worker | 
 | 583 | 	 * as paused for all matters practical. effectively, we only | 
 | 584 | 	 * avoid cancellation requests from completing. | 
 | 585 | 	 */ | 
 | 586 | 	mutex_lock(&fs_info->scrub_lock); | 
 | 587 | 	atomic_inc(&fs_info->scrubs_running); | 
 | 588 | 	atomic_inc(&fs_info->scrubs_paused); | 
 | 589 | 	mutex_unlock(&fs_info->scrub_lock); | 
| Wang Shilong | 32a4478 | 2014-02-19 19:24:19 +0800 | [diff] [blame] | 590 |  | 
 | 591 | 	/* | 
 | 592 | 	 * check if @scrubs_running=@scrubs_paused condition | 
 | 593 | 	 * inside wait_event() is not an atomic operation. | 
 | 594 | 	 * which means we may inc/dec @scrub_running/paused | 
 | 595 | 	 * at any time. Let's wake up @scrub_pause_wait as | 
 | 596 | 	 * much as we can to let commit transaction blocked less. | 
 | 597 | 	 */ | 
 | 598 | 	wake_up(&fs_info->scrub_pause_wait); | 
 | 599 |  | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 600 | 	atomic_inc(&sctx->workers_pending); | 
 | 601 | } | 
 | 602 |  | 
 | 603 | /* used for workers that require transaction commits */ | 
 | 604 | static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx) | 
 | 605 | { | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 606 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 607 |  | 
 | 608 | 	/* | 
 | 609 | 	 * see scrub_pending_trans_workers_inc() why we're pretending | 
 | 610 | 	 * to be paused in the scrub counters | 
 | 611 | 	 */ | 
 | 612 | 	mutex_lock(&fs_info->scrub_lock); | 
 | 613 | 	atomic_dec(&fs_info->scrubs_running); | 
 | 614 | 	atomic_dec(&fs_info->scrubs_paused); | 
 | 615 | 	mutex_unlock(&fs_info->scrub_lock); | 
 | 616 | 	atomic_dec(&sctx->workers_pending); | 
 | 617 | 	wake_up(&fs_info->scrub_pause_wait); | 
 | 618 | 	wake_up(&sctx->list_wait); | 
| Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 619 | 	scrub_put_ctx(sctx); | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 620 | } | 
 | 621 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 622 | static void scrub_free_csums(struct scrub_ctx *sctx) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 623 | { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 624 | 	while (!list_empty(&sctx->csum_list)) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 625 | 		struct btrfs_ordered_sum *sum; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 626 | 		sum = list_first_entry(&sctx->csum_list, | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 627 | 				       struct btrfs_ordered_sum, list); | 
 | 628 | 		list_del(&sum->list); | 
 | 629 | 		kfree(sum); | 
 | 630 | 	} | 
 | 631 | } | 
 | 632 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 633 | static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 634 | { | 
 | 635 | 	int i; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 636 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 637 | 	if (!sctx) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 638 | 		return; | 
 | 639 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 640 | 	/* this can happen when scrub is cancelled */ | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 641 | 	if (sctx->curr != -1) { | 
 | 642 | 		struct scrub_bio *sbio = sctx->bios[sctx->curr]; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 643 |  | 
 | 644 | 		for (i = 0; i < sbio->page_count; i++) { | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 645 | 			WARN_ON(!sbio->pagev[i]->page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 646 | 			scrub_block_put(sbio->pagev[i]->sblock); | 
 | 647 | 		} | 
 | 648 | 		bio_put(sbio->bio); | 
 | 649 | 	} | 
 | 650 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 651 | 	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 652 | 		struct scrub_bio *sbio = sctx->bios[i]; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 653 |  | 
 | 654 | 		if (!sbio) | 
 | 655 | 			break; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 656 | 		kfree(sbio); | 
 | 657 | 	} | 
 | 658 |  | 
| David Sterba | 4e2814e | 2017-05-16 19:10:29 +0200 | [diff] [blame] | 659 | 	kfree(sctx->wr_ctx.wr_curr_bio); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 660 | 	scrub_free_csums(sctx); | 
 | 661 | 	kfree(sctx); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 662 | } | 
 | 663 |  | 
| Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 664 | static void scrub_put_ctx(struct scrub_ctx *sctx) | 
 | 665 | { | 
| Elena Reshetova | 99f4cdb | 2017-03-03 10:55:25 +0200 | [diff] [blame] | 666 | 	if (refcount_dec_and_test(&sctx->refs)) | 
| Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 667 | 		scrub_free_ctx(sctx); | 
 | 668 | } | 
 | 669 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 670 | static noinline_for_stack | 
| Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 671 | struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 672 | { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 673 | 	struct scrub_ctx *sctx; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 674 | 	int		i; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 675 | 	struct btrfs_fs_info *fs_info = dev->fs_info; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 676 |  | 
| David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 677 | 	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 678 | 	if (!sctx) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 679 | 		goto nomem; | 
| Elena Reshetova | 99f4cdb | 2017-03-03 10:55:25 +0200 | [diff] [blame] | 680 | 	refcount_set(&sctx->refs, 1); | 
| Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 681 | 	sctx->is_dev_replace = is_dev_replace; | 
| Kent Overstreet | b54ffb7 | 2015-05-19 14:31:01 +0200 | [diff] [blame] | 682 | 	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 683 | 	sctx->curr = -1; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 684 | 	sctx->fs_info = dev->fs_info; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 685 | 	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 686 | 		struct scrub_bio *sbio; | 
 | 687 |  | 
| David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 688 | 		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 689 | 		if (!sbio) | 
 | 690 | 			goto nomem; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 691 | 		sctx->bios[i] = sbio; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 692 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 693 | 		sbio->index = i; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 694 | 		sbio->sctx = sctx; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 695 | 		sbio->page_count = 0; | 
| Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 696 | 		btrfs_init_work(&sbio->work, btrfs_scrub_helper, | 
 | 697 | 				scrub_bio_end_io_worker, NULL, NULL); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 698 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 699 | 		if (i != SCRUB_BIOS_PER_SCTX - 1) | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 700 | 			sctx->bios[i]->next_free = i + 1; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 701 | 		else | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 702 | 			sctx->bios[i]->next_free = -1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 703 | 	} | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 704 | 	sctx->first_free = 0; | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 705 | 	atomic_set(&sctx->bios_in_flight, 0); | 
 | 706 | 	atomic_set(&sctx->workers_pending, 0); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 707 | 	atomic_set(&sctx->cancel_req, 0); | 
 | 708 | 	sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); | 
 | 709 | 	INIT_LIST_HEAD(&sctx->csum_list); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 710 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 711 | 	spin_lock_init(&sctx->list_lock); | 
 | 712 | 	spin_lock_init(&sctx->stat_lock); | 
 | 713 | 	init_waitqueue_head(&sctx->list_wait); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 714 |  | 
| David Sterba | 8fcdac3 | 2017-05-16 19:10:23 +0200 | [diff] [blame] | 715 | 	WARN_ON(sctx->wr_ctx.wr_curr_bio != NULL); | 
 | 716 | 	mutex_init(&sctx->wr_ctx.wr_lock); | 
 | 717 | 	sctx->wr_ctx.wr_curr_bio = NULL; | 
 | 718 | 	if (is_dev_replace) { | 
 | 719 | 		WARN_ON(!dev->bdev); | 
 | 720 | 		sctx->wr_ctx.pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO; | 
 | 721 | 		sctx->wr_ctx.tgtdev = dev; | 
 | 722 | 		atomic_set(&sctx->wr_ctx.flush_all_writes, 0); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 723 | 	} | 
| David Sterba | 8fcdac3 | 2017-05-16 19:10:23 +0200 | [diff] [blame] | 724 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 725 | 	return sctx; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 726 |  | 
 | 727 | nomem: | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 728 | 	scrub_free_ctx(sctx); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 729 | 	return ERR_PTR(-ENOMEM); | 
 | 730 | } | 
 | 731 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 732 | static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, | 
 | 733 | 				     void *warn_ctx) | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 734 | { | 
 | 735 | 	u64 isize; | 
 | 736 | 	u32 nlink; | 
 | 737 | 	int ret; | 
 | 738 | 	int i; | 
 | 739 | 	struct extent_buffer *eb; | 
 | 740 | 	struct btrfs_inode_item *inode_item; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 741 | 	struct scrub_warning *swarn = warn_ctx; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 742 | 	struct btrfs_fs_info *fs_info = swarn->dev->fs_info; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 743 | 	struct inode_fs_paths *ipath = NULL; | 
 | 744 | 	struct btrfs_root *local_root; | 
 | 745 | 	struct btrfs_key root_key; | 
| David Sterba | 1d4c08e | 2015-01-02 19:36:14 +0100 | [diff] [blame] | 746 | 	struct btrfs_key key; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 747 |  | 
 | 748 | 	root_key.objectid = root; | 
 | 749 | 	root_key.type = BTRFS_ROOT_ITEM_KEY; | 
 | 750 | 	root_key.offset = (u64)-1; | 
 | 751 | 	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key); | 
 | 752 | 	if (IS_ERR(local_root)) { | 
 | 753 | 		ret = PTR_ERR(local_root); | 
 | 754 | 		goto err; | 
 | 755 | 	} | 
 | 756 |  | 
| David Sterba | 14692cc | 2015-01-02 18:55:46 +0100 | [diff] [blame] | 757 | 	/* | 
 | 758 | 	 * this makes the path point to (inum INODE_ITEM ioff) | 
 | 759 | 	 */ | 
| David Sterba | 1d4c08e | 2015-01-02 19:36:14 +0100 | [diff] [blame] | 760 | 	key.objectid = inum; | 
 | 761 | 	key.type = BTRFS_INODE_ITEM_KEY; | 
 | 762 | 	key.offset = 0; | 
 | 763 |  | 
 | 764 | 	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 765 | 	if (ret) { | 
 | 766 | 		btrfs_release_path(swarn->path); | 
 | 767 | 		goto err; | 
 | 768 | 	} | 
 | 769 |  | 
 | 770 | 	eb = swarn->path->nodes[0]; | 
 | 771 | 	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], | 
 | 772 | 					struct btrfs_inode_item); | 
 | 773 | 	isize = btrfs_inode_size(eb, inode_item); | 
 | 774 | 	nlink = btrfs_inode_nlink(eb, inode_item); | 
 | 775 | 	btrfs_release_path(swarn->path); | 
 | 776 |  | 
 | 777 | 	ipath = init_ipath(4096, local_root, swarn->path); | 
| Dan Carpenter | 26bdef5 | 2011-11-16 11:28:01 +0300 | [diff] [blame] | 778 | 	if (IS_ERR(ipath)) { | 
 | 779 | 		ret = PTR_ERR(ipath); | 
 | 780 | 		ipath = NULL; | 
 | 781 | 		goto err; | 
 | 782 | 	} | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 783 | 	ret = paths_from_inode(inum, ipath); | 
 | 784 |  | 
 | 785 | 	if (ret < 0) | 
 | 786 | 		goto err; | 
 | 787 |  | 
 | 788 | 	/* | 
 | 789 | 	 * we deliberately ignore the bit ipath might have been too small to | 
 | 790 | 	 * hold all of the paths here | 
 | 791 | 	 */ | 
 | 792 | 	for (i = 0; i < ipath->fspath->elem_cnt; ++i) | 
| Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 793 | 		btrfs_warn_in_rcu(fs_info, | 
 | 794 | 				  "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)", | 
 | 795 | 				  swarn->errstr, swarn->logical, | 
 | 796 | 				  rcu_str_deref(swarn->dev->name), | 
 | 797 | 				  (unsigned long long)swarn->sector, | 
 | 798 | 				  root, inum, offset, | 
 | 799 | 				  min(isize - offset, (u64)PAGE_SIZE), nlink, | 
 | 800 | 				  (char *)(unsigned long)ipath->fspath->val[i]); | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 801 |  | 
 | 802 | 	free_ipath(ipath); | 
 | 803 | 	return 0; | 
 | 804 |  | 
 | 805 | err: | 
| Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 806 | 	btrfs_warn_in_rcu(fs_info, | 
 | 807 | 			  "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d", | 
 | 808 | 			  swarn->errstr, swarn->logical, | 
 | 809 | 			  rcu_str_deref(swarn->dev->name), | 
 | 810 | 			  (unsigned long long)swarn->sector, | 
 | 811 | 			  root, inum, offset, ret); | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 812 |  | 
 | 813 | 	free_ipath(ipath); | 
 | 814 | 	return 0; | 
 | 815 | } | 
 | 816 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 817 | static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 818 | { | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 819 | 	struct btrfs_device *dev; | 
 | 820 | 	struct btrfs_fs_info *fs_info; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 821 | 	struct btrfs_path *path; | 
 | 822 | 	struct btrfs_key found_key; | 
 | 823 | 	struct extent_buffer *eb; | 
 | 824 | 	struct btrfs_extent_item *ei; | 
 | 825 | 	struct scrub_warning swarn; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 826 | 	unsigned long ptr = 0; | 
| Jan Schmidt | 4692cf5 | 2011-12-02 14:56:41 +0100 | [diff] [blame] | 827 | 	u64 extent_item_pos; | 
| Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 828 | 	u64 flags = 0; | 
 | 829 | 	u64 ref_root; | 
 | 830 | 	u32 item_size; | 
| Dan Carpenter | 07c9a8e | 2016-03-11 11:08:56 +0300 | [diff] [blame] | 831 | 	u8 ref_level = 0; | 
| Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 832 | 	int ret; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 833 |  | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 834 | 	WARN_ON(sblock->page_count < 1); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 835 | 	dev = sblock->pagev[0]->dev; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 836 | 	fs_info = sblock->sctx->fs_info; | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 837 |  | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 838 | 	path = btrfs_alloc_path(); | 
| David Sterba | 8b9456d | 2014-07-30 01:25:30 +0200 | [diff] [blame] | 839 | 	if (!path) | 
 | 840 | 		return; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 841 |  | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 842 | 	swarn.sector = (sblock->pagev[0]->physical) >> 9; | 
 | 843 | 	swarn.logical = sblock->pagev[0]->logical; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 844 | 	swarn.errstr = errstr; | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 845 | 	swarn.dev = NULL; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 846 |  | 
| Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 847 | 	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, | 
 | 848 | 				  &flags); | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 849 | 	if (ret < 0) | 
 | 850 | 		goto out; | 
 | 851 |  | 
| Jan Schmidt | 4692cf5 | 2011-12-02 14:56:41 +0100 | [diff] [blame] | 852 | 	extent_item_pos = swarn.logical - found_key.objectid; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 853 | 	swarn.extent_item_size = found_key.offset; | 
 | 854 |  | 
 | 855 | 	eb = path->nodes[0]; | 
 | 856 | 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); | 
 | 857 | 	item_size = btrfs_item_size_nr(eb, path->slots[0]); | 
 | 858 |  | 
| Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 859 | 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 860 | 		do { | 
| Liu Bo | 6eda71d | 2014-06-09 10:54:07 +0800 | [diff] [blame] | 861 | 			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, | 
 | 862 | 						      item_size, &ref_root, | 
 | 863 | 						      &ref_level); | 
| David Sterba | ecaeb14 | 2015-10-08 09:01:03 +0200 | [diff] [blame] | 864 | 			btrfs_warn_in_rcu(fs_info, | 
| Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 865 | 				"%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu", | 
 | 866 | 				errstr, swarn.logical, | 
| Josef Bacik | 606686e | 2012-06-04 14:03:51 -0400 | [diff] [blame] | 867 | 				rcu_str_deref(dev->name), | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 868 | 				(unsigned long long)swarn.sector, | 
 | 869 | 				ref_level ? "node" : "leaf", | 
 | 870 | 				ret < 0 ? -1 : ref_level, | 
 | 871 | 				ret < 0 ? -1 : ref_root); | 
 | 872 | 		} while (ret != 1); | 
| Josef Bacik | d8fe29e | 2013-03-29 08:09:34 -0600 | [diff] [blame] | 873 | 		btrfs_release_path(path); | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 874 | 	} else { | 
| Josef Bacik | d8fe29e | 2013-03-29 08:09:34 -0600 | [diff] [blame] | 875 | 		btrfs_release_path(path); | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 876 | 		swarn.path = path; | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 877 | 		swarn.dev = dev; | 
| Jan Schmidt | 7a3ae2f | 2012-03-23 17:32:28 +0100 | [diff] [blame] | 878 | 		iterate_extent_inodes(fs_info, found_key.objectid, | 
 | 879 | 					extent_item_pos, 1, | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 880 | 					scrub_print_warning_inode, &swarn); | 
 | 881 | 	} | 
 | 882 |  | 
 | 883 | out: | 
 | 884 | 	btrfs_free_path(path); | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 885 | } | 
 | 886 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 887 | static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 888 | { | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 889 | 	struct page *page = NULL; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 890 | 	unsigned long index; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 891 | 	struct scrub_fixup_nodatasum *fixup = fixup_ctx; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 892 | 	int ret; | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 893 | 	int corrected = 0; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 894 | 	struct btrfs_key key; | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 895 | 	struct inode *inode = NULL; | 
| Liu Bo | 6f1c360 | 2013-01-29 03:22:10 +0000 | [diff] [blame] | 896 | 	struct btrfs_fs_info *fs_info; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 897 | 	u64 end = offset + PAGE_SIZE - 1; | 
 | 898 | 	struct btrfs_root *local_root; | 
| Liu Bo | 6f1c360 | 2013-01-29 03:22:10 +0000 | [diff] [blame] | 899 | 	int srcu_index; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 900 |  | 
 | 901 | 	key.objectid = root; | 
 | 902 | 	key.type = BTRFS_ROOT_ITEM_KEY; | 
 | 903 | 	key.offset = (u64)-1; | 
| Liu Bo | 6f1c360 | 2013-01-29 03:22:10 +0000 | [diff] [blame] | 904 |  | 
 | 905 | 	fs_info = fixup->root->fs_info; | 
 | 906 | 	srcu_index = srcu_read_lock(&fs_info->subvol_srcu); | 
 | 907 |  | 
 | 908 | 	local_root = btrfs_read_fs_root_no_name(fs_info, &key); | 
 | 909 | 	if (IS_ERR(local_root)) { | 
 | 910 | 		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 911 | 		return PTR_ERR(local_root); | 
| Liu Bo | 6f1c360 | 2013-01-29 03:22:10 +0000 | [diff] [blame] | 912 | 	} | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 913 |  | 
 | 914 | 	key.type = BTRFS_INODE_ITEM_KEY; | 
 | 915 | 	key.objectid = inum; | 
 | 916 | 	key.offset = 0; | 
| Liu Bo | 6f1c360 | 2013-01-29 03:22:10 +0000 | [diff] [blame] | 917 | 	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); | 
 | 918 | 	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 919 | 	if (IS_ERR(inode)) | 
 | 920 | 		return PTR_ERR(inode); | 
 | 921 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 922 | 	index = offset >> PAGE_SHIFT; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 923 |  | 
 | 924 | 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 925 | 	if (!page) { | 
 | 926 | 		ret = -ENOMEM; | 
 | 927 | 		goto out; | 
 | 928 | 	} | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 929 |  | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 930 | 	if (PageUptodate(page)) { | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 931 | 		if (PageDirty(page)) { | 
 | 932 | 			/* | 
 | 933 | 			 * we need to write the data to the defect sector. the | 
 | 934 | 			 * data that was in that sector is not in memory, | 
 | 935 | 			 * because the page was modified. we must not write the | 
 | 936 | 			 * modified page to that sector. | 
 | 937 | 			 * | 
 | 938 | 			 * TODO: what could be done here: wait for the delalloc | 
 | 939 | 			 *       runner to write out that page (might involve | 
 | 940 | 			 *       COW) and see whether the sector is still | 
 | 941 | 			 *       referenced afterwards. | 
 | 942 | 			 * | 
 | 943 | 			 * For the meantime, we'll treat this error | 
 | 944 | 			 * incorrectable, although there is a chance that a | 
 | 945 | 			 * later scrub will find the bad sector again and that | 
 | 946 | 			 * there's no dirty page in memory, then. | 
 | 947 | 			 */ | 
 | 948 | 			ret = -EIO; | 
 | 949 | 			goto out; | 
 | 950 | 		} | 
| Josef Bacik | 6ec656b | 2017-05-05 11:57:14 -0400 | [diff] [blame] | 951 | 		ret = repair_io_failure(fs_info, inum, offset, PAGE_SIZE, | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 952 | 					fixup->logical, page, | 
| Miao Xie | ffdd201 | 2014-09-12 18:44:00 +0800 | [diff] [blame] | 953 | 					offset - page_offset(page), | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 954 | 					fixup->mirror_num); | 
 | 955 | 		unlock_page(page); | 
 | 956 | 		corrected = !ret; | 
 | 957 | 	} else { | 
 | 958 | 		/* | 
 | 959 | 		 * we need to get good data first. the general readpage path | 
 | 960 | 		 * will call repair_io_failure for us, we just have to make | 
 | 961 | 		 * sure we read the bad mirror. | 
 | 962 | 		 */ | 
 | 963 | 		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, | 
| David Sterba | ceeb0ae | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 964 | 					EXTENT_DAMAGED); | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 965 | 		if (ret) { | 
 | 966 | 			/* set_extent_bits should give proper error */ | 
 | 967 | 			WARN_ON(ret > 0); | 
 | 968 | 			if (ret > 0) | 
 | 969 | 				ret = -EFAULT; | 
 | 970 | 			goto out; | 
 | 971 | 		} | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 972 |  | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 973 | 		ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page, | 
 | 974 | 						btrfs_get_extent, | 
 | 975 | 						fixup->mirror_num); | 
 | 976 | 		wait_on_page_locked(page); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 977 |  | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 978 | 		corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset, | 
 | 979 | 						end, EXTENT_DAMAGED, 0, NULL); | 
 | 980 | 		if (!corrected) | 
 | 981 | 			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, | 
| David Sterba | 9116621 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 982 | 						EXTENT_DAMAGED); | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 983 | 	} | 
 | 984 |  | 
 | 985 | out: | 
 | 986 | 	if (page) | 
 | 987 | 		put_page(page); | 
| Tobias Klauser | 7fb18a0 | 2014-04-25 14:58:05 +0200 | [diff] [blame] | 988 |  | 
 | 989 | 	iput(inode); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 990 |  | 
 | 991 | 	if (ret < 0) | 
 | 992 | 		return ret; | 
 | 993 |  | 
 | 994 | 	if (ret == 0 && corrected) { | 
 | 995 | 		/* | 
 | 996 | 		 * we only need to call readpage for one of the inodes belonging | 
 | 997 | 		 * to this extent. so make iterate_extent_inodes stop | 
 | 998 | 		 */ | 
 | 999 | 		return 1; | 
 | 1000 | 	} | 
 | 1001 |  | 
 | 1002 | 	return -EIO; | 
 | 1003 | } | 
 | 1004 |  | 
 | 1005 | static void scrub_fixup_nodatasum(struct btrfs_work *work) | 
 | 1006 | { | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1007 | 	struct btrfs_fs_info *fs_info; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 1008 | 	int ret; | 
 | 1009 | 	struct scrub_fixup_nodatasum *fixup; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1010 | 	struct scrub_ctx *sctx; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 1011 | 	struct btrfs_trans_handle *trans = NULL; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 1012 | 	struct btrfs_path *path; | 
 | 1013 | 	int uncorrectable = 0; | 
 | 1014 |  | 
 | 1015 | 	fixup = container_of(work, struct scrub_fixup_nodatasum, work); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1016 | 	sctx = fixup->sctx; | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1017 | 	fs_info = fixup->root->fs_info; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 1018 |  | 
 | 1019 | 	path = btrfs_alloc_path(); | 
 | 1020 | 	if (!path) { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1021 | 		spin_lock(&sctx->stat_lock); | 
 | 1022 | 		++sctx->stat.malloc_errors; | 
 | 1023 | 		spin_unlock(&sctx->stat_lock); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 1024 | 		uncorrectable = 1; | 
 | 1025 | 		goto out; | 
 | 1026 | 	} | 
 | 1027 |  | 
 | 1028 | 	trans = btrfs_join_transaction(fixup->root); | 
 | 1029 | 	if (IS_ERR(trans)) { | 
 | 1030 | 		uncorrectable = 1; | 
 | 1031 | 		goto out; | 
 | 1032 | 	} | 
 | 1033 |  | 
 | 1034 | 	/* | 
 | 1035 | 	 * the idea is to trigger a regular read through the standard path. we | 
 | 1036 | 	 * read a page from the (failed) logical address by specifying the | 
 | 1037 | 	 * corresponding copynum of the failed sector. thus, that readpage is | 
 | 1038 | 	 * expected to fail. | 
 | 1039 | 	 * that is the point where on-the-fly error correction will kick in | 
 | 1040 | 	 * (once it's finished) and rewrite the failed sector if a good copy | 
 | 1041 | 	 * can be found. | 
 | 1042 | 	 */ | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1043 | 	ret = iterate_inodes_from_logical(fixup->logical, fs_info, path, | 
 | 1044 | 					  scrub_fixup_readpage, fixup); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 1045 | 	if (ret < 0) { | 
 | 1046 | 		uncorrectable = 1; | 
 | 1047 | 		goto out; | 
 | 1048 | 	} | 
 | 1049 | 	WARN_ON(ret != 1); | 
 | 1050 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1051 | 	spin_lock(&sctx->stat_lock); | 
 | 1052 | 	++sctx->stat.corrected_errors; | 
 | 1053 | 	spin_unlock(&sctx->stat_lock); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 1054 |  | 
 | 1055 | out: | 
 | 1056 | 	if (trans && !IS_ERR(trans)) | 
| Jeff Mahoney | 3a45bb2 | 2016-09-09 21:39:03 -0400 | [diff] [blame] | 1057 | 		btrfs_end_transaction(trans); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 1058 | 	if (uncorrectable) { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1059 | 		spin_lock(&sctx->stat_lock); | 
 | 1060 | 		++sctx->stat.uncorrectable_errors; | 
 | 1061 | 		spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1062 | 		btrfs_dev_replace_stats_inc( | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1063 | 			&fs_info->dev_replace.num_uncorrectable_read_errors); | 
 | 1064 | 		btrfs_err_rl_in_rcu(fs_info, | 
| David Sterba | b14af3b | 2015-10-08 10:43:10 +0200 | [diff] [blame] | 1065 | 		    "unable to fixup (nodatasum) error at logical %llu on dev %s", | 
| Geert Uytterhoeven | c1c9ff7 | 2013-08-20 13:20:07 +0200 | [diff] [blame] | 1066 | 			fixup->logical, rcu_str_deref(fixup->dev->name)); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 1067 | 	} | 
 | 1068 |  | 
 | 1069 | 	btrfs_free_path(path); | 
 | 1070 | 	kfree(fixup); | 
 | 1071 |  | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 1072 | 	scrub_pending_trans_workers_dec(sctx); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 1073 | } | 
 | 1074 |  | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1075 | static inline void scrub_get_recover(struct scrub_recover *recover) | 
 | 1076 | { | 
| Elena Reshetova | 6f61501 | 2017-03-03 10:55:21 +0200 | [diff] [blame] | 1077 | 	refcount_inc(&recover->refs); | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1078 | } | 
 | 1079 |  | 
| Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1080 | static inline void scrub_put_recover(struct btrfs_fs_info *fs_info, | 
 | 1081 | 				     struct scrub_recover *recover) | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1082 | { | 
| Elena Reshetova | 6f61501 | 2017-03-03 10:55:21 +0200 | [diff] [blame] | 1083 | 	if (refcount_dec_and_test(&recover->refs)) { | 
| Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1084 | 		btrfs_bio_counter_dec(fs_info); | 
| Zhao Lei | 6e9606d | 2015-01-20 15:11:34 +0800 | [diff] [blame] | 1085 | 		btrfs_put_bbio(recover->bbio); | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1086 | 		kfree(recover); | 
 | 1087 | 	} | 
 | 1088 | } | 
 | 1089 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1090 | /* | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1091 |  * scrub_handle_errored_block gets called when either verification of the | 
 | 1092 |  * pages failed or the bio failed to read, e.g. with EIO. In the latter | 
 | 1093 |  * case, this function handles all pages in the bio, even though only one | 
 | 1094 |  * may be bad. | 
 | 1095 |  * The goal of this function is to repair the errored block by using the | 
 | 1096 |  * contents of one of the mirrors. | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1097 |  */ | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1098 | static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1099 | { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1100 | 	struct scrub_ctx *sctx = sblock_to_check->sctx; | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 1101 | 	struct btrfs_device *dev; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1102 | 	struct btrfs_fs_info *fs_info; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1103 | 	u64 length; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1104 | 	u64 logical; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1105 | 	unsigned int failed_mirror_index; | 
 | 1106 | 	unsigned int is_metadata; | 
 | 1107 | 	unsigned int have_csum; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1108 | 	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */ | 
 | 1109 | 	struct scrub_block *sblock_bad; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1110 | 	int ret; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1111 | 	int mirror_index; | 
 | 1112 | 	int page_num; | 
 | 1113 | 	int success; | 
| Qu Wenruo | 28d70e2 | 2017-04-14 08:35:55 +0800 | [diff] [blame] | 1114 | 	bool full_stripe_locked; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1115 | 	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, | 
 | 1116 | 				      DEFAULT_RATELIMIT_BURST); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1117 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1118 | 	BUG_ON(sblock_to_check->page_count < 1); | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 1119 | 	fs_info = sctx->fs_info; | 
| Stefan Behrens | 4ded4f6 | 2012-11-14 18:57:29 +0000 | [diff] [blame] | 1120 | 	if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { | 
 | 1121 | 		/* | 
 | 1122 | 		 * if we find an error in a super block, we just report it. | 
 | 1123 | 		 * They will get written with the next transaction commit | 
 | 1124 | 		 * anyway | 
 | 1125 | 		 */ | 
 | 1126 | 		spin_lock(&sctx->stat_lock); | 
 | 1127 | 		++sctx->stat.super_errors; | 
 | 1128 | 		spin_unlock(&sctx->stat_lock); | 
 | 1129 | 		return 0; | 
 | 1130 | 	} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1131 | 	length = sblock_to_check->page_count * PAGE_SIZE; | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1132 | 	logical = sblock_to_check->pagev[0]->logical; | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1133 | 	BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1); | 
 | 1134 | 	failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1; | 
 | 1135 | 	is_metadata = !(sblock_to_check->pagev[0]->flags & | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1136 | 			BTRFS_EXTENT_FLAG_DATA); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1137 | 	have_csum = sblock_to_check->pagev[0]->have_csum; | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1138 | 	dev = sblock_to_check->pagev[0]->dev; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1139 |  | 
| Qu Wenruo | 28d70e2 | 2017-04-14 08:35:55 +0800 | [diff] [blame] | 1140 | 	/* | 
 | 1141 | 	 * For RAID5/6, race can happen for a different device scrub thread. | 
 | 1142 | 	 * For data corruption, Parity and Data threads will both try | 
 | 1143 | 	 * to recovery the data. | 
 | 1144 | 	 * Race can lead to doubly added csum error, or even unrecoverable | 
 | 1145 | 	 * error. | 
 | 1146 | 	 */ | 
 | 1147 | 	ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); | 
 | 1148 | 	if (ret < 0) { | 
 | 1149 | 		spin_lock(&sctx->stat_lock); | 
 | 1150 | 		if (ret == -ENOMEM) | 
 | 1151 | 			sctx->stat.malloc_errors++; | 
 | 1152 | 		sctx->stat.read_errors++; | 
 | 1153 | 		sctx->stat.uncorrectable_errors++; | 
 | 1154 | 		spin_unlock(&sctx->stat_lock); | 
 | 1155 | 		return ret; | 
 | 1156 | 	} | 
 | 1157 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1158 | 	if (sctx->is_dev_replace && !is_metadata && !have_csum) { | 
 | 1159 | 		sblocks_for_recheck = NULL; | 
 | 1160 | 		goto nodatasum_case; | 
 | 1161 | 	} | 
 | 1162 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1163 | 	/* | 
 | 1164 | 	 * read all mirrors one after the other. This includes to | 
 | 1165 | 	 * re-read the extent or metadata block that failed (that was | 
 | 1166 | 	 * the cause that this fixup code is called) another time, | 
 | 1167 | 	 * page by page this time in order to know which pages | 
 | 1168 | 	 * caused I/O errors and which ones are good (for all mirrors). | 
 | 1169 | 	 * It is the goal to handle the situation when more than one | 
 | 1170 | 	 * mirror contains I/O errors, but the errors do not | 
 | 1171 | 	 * overlap, i.e. the data can be repaired by selecting the | 
 | 1172 | 	 * pages from those mirrors without I/O error on the | 
 | 1173 | 	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE) | 
 | 1174 | 	 * would be that mirror #1 has an I/O error on the first page, | 
 | 1175 | 	 * the second page is good, and mirror #2 has an I/O error on | 
 | 1176 | 	 * the second page, but the first page is good. | 
 | 1177 | 	 * Then the first page of the first mirror can be repaired by | 
 | 1178 | 	 * taking the first page of the second mirror, and the | 
 | 1179 | 	 * second page of the second mirror can be repaired by | 
 | 1180 | 	 * copying the contents of the 2nd page of the 1st mirror. | 
 | 1181 | 	 * One more note: if the pages of one mirror contain I/O | 
 | 1182 | 	 * errors, the checksum cannot be verified. In order to get | 
 | 1183 | 	 * the best data for repairing, the first attempt is to find | 
 | 1184 | 	 * a mirror without I/O errors and with a validated checksum. | 
 | 1185 | 	 * Only if this is not possible, the pages are picked from | 
 | 1186 | 	 * mirrors with I/O errors without considering the checksum. | 
 | 1187 | 	 * If the latter is the case, at the end, the checksum of the | 
 | 1188 | 	 * repaired area is verified in order to correctly maintain | 
 | 1189 | 	 * the statistics. | 
 | 1190 | 	 */ | 
 | 1191 |  | 
| David Sterba | 31e818f | 2015-02-20 18:00:26 +0100 | [diff] [blame] | 1192 | 	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS, | 
 | 1193 | 				      sizeof(*sblocks_for_recheck), GFP_NOFS); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1194 | 	if (!sblocks_for_recheck) { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1195 | 		spin_lock(&sctx->stat_lock); | 
 | 1196 | 		sctx->stat.malloc_errors++; | 
 | 1197 | 		sctx->stat.read_errors++; | 
 | 1198 | 		sctx->stat.uncorrectable_errors++; | 
 | 1199 | 		spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 1200 | 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1201 | 		goto out; | 
 | 1202 | 	} | 
 | 1203 |  | 
 | 1204 | 	/* setup the context, map the logical blocks and alloc the pages */ | 
| Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 1205 | 	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1206 | 	if (ret) { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1207 | 		spin_lock(&sctx->stat_lock); | 
 | 1208 | 		sctx->stat.read_errors++; | 
 | 1209 | 		sctx->stat.uncorrectable_errors++; | 
 | 1210 | 		spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 1211 | 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1212 | 		goto out; | 
 | 1213 | 	} | 
 | 1214 | 	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); | 
 | 1215 | 	sblock_bad = sblocks_for_recheck + failed_mirror_index; | 
 | 1216 |  | 
 | 1217 | 	/* build and submit the bios for the failed mirror, check checksums */ | 
| Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 1218 | 	scrub_recheck_block(fs_info, sblock_bad, 1); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1219 |  | 
 | 1220 | 	if (!sblock_bad->header_error && !sblock_bad->checksum_error && | 
 | 1221 | 	    sblock_bad->no_io_error_seen) { | 
 | 1222 | 		/* | 
 | 1223 | 		 * the error disappeared after reading page by page, or | 
 | 1224 | 		 * the area was part of a huge bio and other parts of the | 
 | 1225 | 		 * bio caused I/O errors, or the block layer merged several | 
 | 1226 | 		 * read requests into one and the error is caused by a | 
 | 1227 | 		 * different bio (usually one of the two latter cases is | 
 | 1228 | 		 * the cause) | 
 | 1229 | 		 */ | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1230 | 		spin_lock(&sctx->stat_lock); | 
 | 1231 | 		sctx->stat.unverified_errors++; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 1232 | 		sblock_to_check->data_corrected = 1; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1233 | 		spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1234 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1235 | 		if (sctx->is_dev_replace) | 
 | 1236 | 			scrub_write_block_to_dev_replace(sblock_bad); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1237 | 		goto out; | 
 | 1238 | 	} | 
 | 1239 |  | 
 | 1240 | 	if (!sblock_bad->no_io_error_seen) { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1241 | 		spin_lock(&sctx->stat_lock); | 
 | 1242 | 		sctx->stat.read_errors++; | 
 | 1243 | 		spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1244 | 		if (__ratelimit(&_rs)) | 
 | 1245 | 			scrub_print_warning("i/o error", sblock_to_check); | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 1246 | 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1247 | 	} else if (sblock_bad->checksum_error) { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1248 | 		spin_lock(&sctx->stat_lock); | 
 | 1249 | 		sctx->stat.csum_errors++; | 
 | 1250 | 		spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1251 | 		if (__ratelimit(&_rs)) | 
 | 1252 | 			scrub_print_warning("checksum error", sblock_to_check); | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 1253 | 		btrfs_dev_stat_inc_and_print(dev, | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1254 | 					     BTRFS_DEV_STAT_CORRUPTION_ERRS); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1255 | 	} else if (sblock_bad->header_error) { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1256 | 		spin_lock(&sctx->stat_lock); | 
 | 1257 | 		sctx->stat.verify_errors++; | 
 | 1258 | 		spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1259 | 		if (__ratelimit(&_rs)) | 
 | 1260 | 			scrub_print_warning("checksum/header error", | 
 | 1261 | 					    sblock_to_check); | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1262 | 		if (sblock_bad->generation_error) | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 1263 | 			btrfs_dev_stat_inc_and_print(dev, | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1264 | 				BTRFS_DEV_STAT_GENERATION_ERRS); | 
 | 1265 | 		else | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 1266 | 			btrfs_dev_stat_inc_and_print(dev, | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1267 | 				BTRFS_DEV_STAT_CORRUPTION_ERRS); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1268 | 	} | 
 | 1269 |  | 
| Ilya Dryomov | 33ef30a | 2013-11-03 19:06:38 +0200 | [diff] [blame] | 1270 | 	if (sctx->readonly) { | 
 | 1271 | 		ASSERT(!sctx->is_dev_replace); | 
 | 1272 | 		goto out; | 
 | 1273 | 	} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1274 |  | 
 | 1275 | 	if (!is_metadata && !have_csum) { | 
 | 1276 | 		struct scrub_fixup_nodatasum *fixup_nodatasum; | 
 | 1277 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1278 | 		WARN_ON(sctx->is_dev_replace); | 
 | 1279 |  | 
| Zhao Lei | b25c94c | 2015-01-20 15:11:35 +0800 | [diff] [blame] | 1280 | nodatasum_case: | 
 | 1281 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1282 | 		/* | 
 | 1283 | 		 * !is_metadata and !have_csum, this means that the data | 
| Nicholas D Steeves | 0132761 | 2016-05-19 21:18:45 -0400 | [diff] [blame] | 1284 | 		 * might not be COWed, that it might be modified | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1285 | 		 * concurrently. The general strategy to work on the | 
 | 1286 | 		 * commit root does not help in the case when COW is not | 
 | 1287 | 		 * used. | 
 | 1288 | 		 */ | 
 | 1289 | 		fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS); | 
 | 1290 | 		if (!fixup_nodatasum) | 
 | 1291 | 			goto did_not_correct_error; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1292 | 		fixup_nodatasum->sctx = sctx; | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 1293 | 		fixup_nodatasum->dev = dev; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1294 | 		fixup_nodatasum->logical = logical; | 
 | 1295 | 		fixup_nodatasum->root = fs_info->extent_root; | 
 | 1296 | 		fixup_nodatasum->mirror_num = failed_mirror_index + 1; | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 1297 | 		scrub_pending_trans_workers_inc(sctx); | 
| Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 1298 | 		btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper, | 
 | 1299 | 				scrub_fixup_nodatasum, NULL, NULL); | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 1300 | 		btrfs_queue_work(fs_info->scrub_workers, | 
 | 1301 | 				 &fixup_nodatasum->work); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1302 | 		goto out; | 
 | 1303 | 	} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1304 |  | 
 | 1305 | 	/* | 
 | 1306 | 	 * now build and submit the bios for the other mirrors, check | 
| Stefan Behrens | cb2ced7 | 2012-11-02 16:14:21 +0100 | [diff] [blame] | 1307 | 	 * checksums. | 
 | 1308 | 	 * First try to pick the mirror which is completely without I/O | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1309 | 	 * errors and also does not have a checksum error. | 
 | 1310 | 	 * If one is found, and if a checksum is present, the full block | 
 | 1311 | 	 * that is known to contain an error is rewritten. Afterwards | 
 | 1312 | 	 * the block is known to be corrected. | 
 | 1313 | 	 * If a mirror is found which is completely correct, and no | 
 | 1314 | 	 * checksum is present, only those pages are rewritten that had | 
 | 1315 | 	 * an I/O error in the block to be repaired, since it cannot be | 
 | 1316 | 	 * determined, which copy of the other pages is better (and it | 
 | 1317 | 	 * could happen otherwise that a correct page would be | 
 | 1318 | 	 * overwritten by a bad one). | 
 | 1319 | 	 */ | 
 | 1320 | 	for (mirror_index = 0; | 
 | 1321 | 	     mirror_index < BTRFS_MAX_MIRRORS && | 
 | 1322 | 	     sblocks_for_recheck[mirror_index].page_count > 0; | 
 | 1323 | 	     mirror_index++) { | 
| Stefan Behrens | cb2ced7 | 2012-11-02 16:14:21 +0100 | [diff] [blame] | 1324 | 		struct scrub_block *sblock_other; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1325 |  | 
| Stefan Behrens | cb2ced7 | 2012-11-02 16:14:21 +0100 | [diff] [blame] | 1326 | 		if (mirror_index == failed_mirror_index) | 
 | 1327 | 			continue; | 
 | 1328 | 		sblock_other = sblocks_for_recheck + mirror_index; | 
 | 1329 |  | 
 | 1330 | 		/* build and submit the bios, check checksums */ | 
| Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 1331 | 		scrub_recheck_block(fs_info, sblock_other, 0); | 
| Stefan Behrens | 34f5c8e | 2012-11-02 16:16:26 +0100 | [diff] [blame] | 1332 |  | 
 | 1333 | 		if (!sblock_other->header_error && | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1334 | 		    !sblock_other->checksum_error && | 
 | 1335 | 		    sblock_other->no_io_error_seen) { | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1336 | 			if (sctx->is_dev_replace) { | 
 | 1337 | 				scrub_write_block_to_dev_replace(sblock_other); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1338 | 				goto corrected_error; | 
| Zhao Lei | 114ab50 | 2015-01-20 15:11:36 +0800 | [diff] [blame] | 1339 | 			} else { | 
 | 1340 | 				ret = scrub_repair_block_from_good_copy( | 
 | 1341 | 						sblock_bad, sblock_other); | 
 | 1342 | 				if (!ret) | 
 | 1343 | 					goto corrected_error; | 
 | 1344 | 			} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1345 | 		} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1346 | 	} | 
 | 1347 |  | 
| Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1348 | 	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) | 
 | 1349 | 		goto did_not_correct_error; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1350 |  | 
 | 1351 | 	/* | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1352 | 	 * In case of I/O errors in the area that is supposed to be | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1353 | 	 * repaired, continue by picking good copies of those pages. | 
 | 1354 | 	 * Select the good pages from mirrors to rewrite bad pages from | 
 | 1355 | 	 * the area to fix. Afterwards verify the checksum of the block | 
 | 1356 | 	 * that is supposed to be repaired. This verification step is | 
 | 1357 | 	 * only done for the purpose of statistic counting and for the | 
 | 1358 | 	 * final scrub report, whether errors remain. | 
 | 1359 | 	 * A perfect algorithm could make use of the checksum and try | 
 | 1360 | 	 * all possible combinations of pages from the different mirrors | 
 | 1361 | 	 * until the checksum verification succeeds. For example, when | 
 | 1362 | 	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page | 
 | 1363 | 	 * of mirror #2 is readable but the final checksum test fails, | 
 | 1364 | 	 * then the 2nd page of mirror #3 could be tried, whether now | 
| Nicholas D Steeves | 0132761 | 2016-05-19 21:18:45 -0400 | [diff] [blame] | 1365 | 	 * the final checksum succeeds. But this would be a rare | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1366 | 	 * exception and is therefore not implemented. At least it is | 
 | 1367 | 	 * avoided that the good copy is overwritten. | 
 | 1368 | 	 * A more useful improvement would be to pick the sectors | 
 | 1369 | 	 * without I/O error based on sector sizes (512 bytes on legacy | 
 | 1370 | 	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one | 
 | 1371 | 	 * mirror could be repaired by taking 512 byte of a different | 
 | 1372 | 	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE | 
 | 1373 | 	 * area are unreadable. | 
 | 1374 | 	 */ | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1375 | 	success = 1; | 
| Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1376 | 	for (page_num = 0; page_num < sblock_bad->page_count; | 
 | 1377 | 	     page_num++) { | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1378 | 		struct scrub_page *page_bad = sblock_bad->pagev[page_num]; | 
| Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1379 | 		struct scrub_block *sblock_other = NULL; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1380 |  | 
| Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1381 | 		/* skip no-io-error page in scrub */ | 
 | 1382 | 		if (!page_bad->io_error && !sctx->is_dev_replace) | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1383 | 			continue; | 
 | 1384 |  | 
| Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1385 | 		/* try to find no-io-error page in mirrors */ | 
 | 1386 | 		if (page_bad->io_error) { | 
 | 1387 | 			for (mirror_index = 0; | 
 | 1388 | 			     mirror_index < BTRFS_MAX_MIRRORS && | 
 | 1389 | 			     sblocks_for_recheck[mirror_index].page_count > 0; | 
 | 1390 | 			     mirror_index++) { | 
 | 1391 | 				if (!sblocks_for_recheck[mirror_index]. | 
 | 1392 | 				    pagev[page_num]->io_error) { | 
 | 1393 | 					sblock_other = sblocks_for_recheck + | 
 | 1394 | 						       mirror_index; | 
 | 1395 | 					break; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1396 | 				} | 
| Jan Schmidt | 13db62b | 2011-06-13 19:56:13 +0200 | [diff] [blame] | 1397 | 			} | 
| Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1398 | 			if (!sblock_other) | 
 | 1399 | 				success = 0; | 
| Jan Schmidt | 13db62b | 2011-06-13 19:56:13 +0200 | [diff] [blame] | 1400 | 		} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1401 |  | 
| Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1402 | 		if (sctx->is_dev_replace) { | 
 | 1403 | 			/* | 
 | 1404 | 			 * did not find a mirror to fetch the page | 
 | 1405 | 			 * from. scrub_write_page_to_dev_replace() | 
 | 1406 | 			 * handles this case (page->io_error), by | 
 | 1407 | 			 * filling the block with zeros before | 
 | 1408 | 			 * submitting the write request | 
 | 1409 | 			 */ | 
 | 1410 | 			if (!sblock_other) | 
 | 1411 | 				sblock_other = sblock_bad; | 
 | 1412 |  | 
 | 1413 | 			if (scrub_write_page_to_dev_replace(sblock_other, | 
 | 1414 | 							    page_num) != 0) { | 
 | 1415 | 				btrfs_dev_replace_stats_inc( | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1416 | 					&fs_info->dev_replace.num_write_errors); | 
| Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1417 | 				success = 0; | 
 | 1418 | 			} | 
 | 1419 | 		} else if (sblock_other) { | 
 | 1420 | 			ret = scrub_repair_page_from_good_copy(sblock_bad, | 
 | 1421 | 							       sblock_other, | 
 | 1422 | 							       page_num, 0); | 
 | 1423 | 			if (0 == ret) | 
 | 1424 | 				page_bad->io_error = 0; | 
 | 1425 | 			else | 
 | 1426 | 				success = 0; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1427 | 		} | 
 | 1428 | 	} | 
 | 1429 |  | 
| Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1430 | 	if (success && !sctx->is_dev_replace) { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1431 | 		if (is_metadata || have_csum) { | 
 | 1432 | 			/* | 
 | 1433 | 			 * need to verify the checksum now that all | 
 | 1434 | 			 * sectors on disk are repaired (the write | 
 | 1435 | 			 * request for data to be repaired is on its way). | 
 | 1436 | 			 * Just be lazy and use scrub_recheck_block() | 
 | 1437 | 			 * which re-reads the data before the checksum | 
 | 1438 | 			 * is verified, but most likely the data comes out | 
 | 1439 | 			 * of the page cache. | 
 | 1440 | 			 */ | 
| Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 1441 | 			scrub_recheck_block(fs_info, sblock_bad, 1); | 
| Stefan Behrens | 34f5c8e | 2012-11-02 16:16:26 +0100 | [diff] [blame] | 1442 | 			if (!sblock_bad->header_error && | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1443 | 			    !sblock_bad->checksum_error && | 
 | 1444 | 			    sblock_bad->no_io_error_seen) | 
 | 1445 | 				goto corrected_error; | 
 | 1446 | 			else | 
 | 1447 | 				goto did_not_correct_error; | 
 | 1448 | 		} else { | 
 | 1449 | corrected_error: | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1450 | 			spin_lock(&sctx->stat_lock); | 
 | 1451 | 			sctx->stat.corrected_errors++; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 1452 | 			sblock_to_check->data_corrected = 1; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1453 | 			spin_unlock(&sctx->stat_lock); | 
| David Sterba | b14af3b | 2015-10-08 10:43:10 +0200 | [diff] [blame] | 1454 | 			btrfs_err_rl_in_rcu(fs_info, | 
 | 1455 | 				"fixed up error at logical %llu on dev %s", | 
| Geert Uytterhoeven | c1c9ff7 | 2013-08-20 13:20:07 +0200 | [diff] [blame] | 1456 | 				logical, rcu_str_deref(dev->name)); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1457 | 		} | 
 | 1458 | 	} else { | 
 | 1459 | did_not_correct_error: | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1460 | 		spin_lock(&sctx->stat_lock); | 
 | 1461 | 		sctx->stat.uncorrectable_errors++; | 
 | 1462 | 		spin_unlock(&sctx->stat_lock); | 
| David Sterba | b14af3b | 2015-10-08 10:43:10 +0200 | [diff] [blame] | 1463 | 		btrfs_err_rl_in_rcu(fs_info, | 
 | 1464 | 			"unable to fixup (regular) error at logical %llu on dev %s", | 
| Geert Uytterhoeven | c1c9ff7 | 2013-08-20 13:20:07 +0200 | [diff] [blame] | 1465 | 			logical, rcu_str_deref(dev->name)); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1466 | 	} | 
 | 1467 |  | 
 | 1468 | out: | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1469 | 	if (sblocks_for_recheck) { | 
 | 1470 | 		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; | 
 | 1471 | 		     mirror_index++) { | 
 | 1472 | 			struct scrub_block *sblock = sblocks_for_recheck + | 
 | 1473 | 						     mirror_index; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1474 | 			struct scrub_recover *recover; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1475 | 			int page_index; | 
 | 1476 |  | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1477 | 			for (page_index = 0; page_index < sblock->page_count; | 
 | 1478 | 			     page_index++) { | 
 | 1479 | 				sblock->pagev[page_index]->sblock = NULL; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1480 | 				recover = sblock->pagev[page_index]->recover; | 
 | 1481 | 				if (recover) { | 
| Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1482 | 					scrub_put_recover(fs_info, recover); | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1483 | 					sblock->pagev[page_index]->recover = | 
 | 1484 | 									NULL; | 
 | 1485 | 				} | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1486 | 				scrub_page_put(sblock->pagev[page_index]); | 
 | 1487 | 			} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1488 | 		} | 
 | 1489 | 		kfree(sblocks_for_recheck); | 
 | 1490 | 	} | 
 | 1491 |  | 
| Qu Wenruo | 28d70e2 | 2017-04-14 08:35:55 +0800 | [diff] [blame] | 1492 | 	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); | 
 | 1493 | 	if (ret < 0) | 
 | 1494 | 		return ret; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1495 | 	return 0; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1496 | } | 
 | 1497 |  | 
| Zhao Lei | 8e5cfb5 | 2015-01-20 15:11:33 +0800 | [diff] [blame] | 1498 | static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1499 | { | 
| Zhao Lei | 10f1190 | 2015-01-20 15:11:43 +0800 | [diff] [blame] | 1500 | 	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) | 
 | 1501 | 		return 2; | 
 | 1502 | 	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) | 
 | 1503 | 		return 3; | 
 | 1504 | 	else | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1505 | 		return (int)bbio->num_stripes; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1506 | } | 
 | 1507 |  | 
| Zhao Lei | 10f1190 | 2015-01-20 15:11:43 +0800 | [diff] [blame] | 1508 | static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, | 
 | 1509 | 						 u64 *raid_map, | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1510 | 						 u64 mapped_length, | 
 | 1511 | 						 int nstripes, int mirror, | 
 | 1512 | 						 int *stripe_index, | 
 | 1513 | 						 u64 *stripe_offset) | 
 | 1514 | { | 
 | 1515 | 	int i; | 
 | 1516 |  | 
| Zhao Lei | ffe2d20 | 2015-01-20 15:11:44 +0800 | [diff] [blame] | 1517 | 	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1518 | 		/* RAID5/6 */ | 
 | 1519 | 		for (i = 0; i < nstripes; i++) { | 
 | 1520 | 			if (raid_map[i] == RAID6_Q_STRIPE || | 
 | 1521 | 			    raid_map[i] == RAID5_P_STRIPE) | 
 | 1522 | 				continue; | 
 | 1523 |  | 
 | 1524 | 			if (logical >= raid_map[i] && | 
 | 1525 | 			    logical < raid_map[i] + mapped_length) | 
 | 1526 | 				break; | 
 | 1527 | 		} | 
 | 1528 |  | 
 | 1529 | 		*stripe_index = i; | 
 | 1530 | 		*stripe_offset = logical - raid_map[i]; | 
 | 1531 | 	} else { | 
 | 1532 | 		/* The other RAID type */ | 
 | 1533 | 		*stripe_index = mirror; | 
 | 1534 | 		*stripe_offset = 0; | 
 | 1535 | 	} | 
 | 1536 | } | 
 | 1537 |  | 
| Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 1538 | static int scrub_setup_recheck_block(struct scrub_block *original_sblock, | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1539 | 				     struct scrub_block *sblocks_for_recheck) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1540 | { | 
| Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 1541 | 	struct scrub_ctx *sctx = original_sblock->sctx; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 1542 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
| Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 1543 | 	u64 length = original_sblock->page_count * PAGE_SIZE; | 
 | 1544 | 	u64 logical = original_sblock->pagev[0]->logical; | 
| Zhao Lei | 4734b7e | 2015-08-19 22:39:18 +0800 | [diff] [blame] | 1545 | 	u64 generation = original_sblock->pagev[0]->generation; | 
 | 1546 | 	u64 flags = original_sblock->pagev[0]->flags; | 
 | 1547 | 	u64 have_csum = original_sblock->pagev[0]->have_csum; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1548 | 	struct scrub_recover *recover; | 
 | 1549 | 	struct btrfs_bio *bbio; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1550 | 	u64 sublen; | 
 | 1551 | 	u64 mapped_length; | 
 | 1552 | 	u64 stripe_offset; | 
 | 1553 | 	int stripe_index; | 
| Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 1554 | 	int page_index = 0; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1555 | 	int mirror_index; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1556 | 	int nmirrors; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1557 | 	int ret; | 
 | 1558 |  | 
 | 1559 | 	/* | 
| Zhao Lei | 5701934 | 2015-01-20 15:11:45 +0800 | [diff] [blame] | 1560 | 	 * note: the two members refs and outstanding_pages | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1561 | 	 * are not used (and not set) in the blocks that are used for | 
 | 1562 | 	 * the recheck procedure | 
 | 1563 | 	 */ | 
 | 1564 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1565 | 	while (length > 0) { | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1566 | 		sublen = min_t(u64, length, PAGE_SIZE); | 
 | 1567 | 		mapped_length = sublen; | 
 | 1568 | 		bbio = NULL; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1569 |  | 
 | 1570 | 		/* | 
 | 1571 | 		 * with a length of PAGE_SIZE, each returned stripe | 
 | 1572 | 		 * represents one mirror | 
 | 1573 | 		 */ | 
| Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1574 | 		btrfs_bio_counter_inc_blocked(fs_info); | 
| Christoph Hellwig | cf8cddd | 2016-10-27 09:27:36 +0200 | [diff] [blame] | 1575 | 		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, | 
| David Sterba | 825ad4c | 2017-03-28 14:45:22 +0200 | [diff] [blame] | 1576 | 				logical, &mapped_length, &bbio); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1577 | 		if (ret || !bbio || mapped_length < sublen) { | 
| Zhao Lei | 6e9606d | 2015-01-20 15:11:34 +0800 | [diff] [blame] | 1578 | 			btrfs_put_bbio(bbio); | 
| Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1579 | 			btrfs_bio_counter_dec(fs_info); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1580 | 			return -EIO; | 
 | 1581 | 		} | 
 | 1582 |  | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1583 | 		recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); | 
 | 1584 | 		if (!recover) { | 
| Zhao Lei | 6e9606d | 2015-01-20 15:11:34 +0800 | [diff] [blame] | 1585 | 			btrfs_put_bbio(bbio); | 
| Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1586 | 			btrfs_bio_counter_dec(fs_info); | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1587 | 			return -ENOMEM; | 
 | 1588 | 		} | 
 | 1589 |  | 
| Elena Reshetova | 6f61501 | 2017-03-03 10:55:21 +0200 | [diff] [blame] | 1590 | 		refcount_set(&recover->refs, 1); | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1591 | 		recover->bbio = bbio; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1592 | 		recover->map_length = mapped_length; | 
 | 1593 |  | 
| Ashish Samant | 2473114 | 2016-04-29 18:33:59 -0700 | [diff] [blame] | 1594 | 		BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK); | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1595 |  | 
| Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 1596 | 		nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); | 
| Zhao Lei | 10f1190 | 2015-01-20 15:11:43 +0800 | [diff] [blame] | 1597 |  | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1598 | 		for (mirror_index = 0; mirror_index < nmirrors; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1599 | 		     mirror_index++) { | 
 | 1600 | 			struct scrub_block *sblock; | 
 | 1601 | 			struct scrub_page *page; | 
 | 1602 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1603 | 			sblock = sblocks_for_recheck + mirror_index; | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1604 | 			sblock->sctx = sctx; | 
| Zhao Lei | 4734b7e | 2015-08-19 22:39:18 +0800 | [diff] [blame] | 1605 |  | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1606 | 			page = kzalloc(sizeof(*page), GFP_NOFS); | 
 | 1607 | 			if (!page) { | 
 | 1608 | leave_nomem: | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1609 | 				spin_lock(&sctx->stat_lock); | 
 | 1610 | 				sctx->stat.malloc_errors++; | 
 | 1611 | 				spin_unlock(&sctx->stat_lock); | 
| Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1612 | 				scrub_put_recover(fs_info, recover); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1613 | 				return -ENOMEM; | 
 | 1614 | 			} | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1615 | 			scrub_page_get(page); | 
 | 1616 | 			sblock->pagev[page_index] = page; | 
| Zhao Lei | 4734b7e | 2015-08-19 22:39:18 +0800 | [diff] [blame] | 1617 | 			page->sblock = sblock; | 
 | 1618 | 			page->flags = flags; | 
 | 1619 | 			page->generation = generation; | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1620 | 			page->logical = logical; | 
| Zhao Lei | 4734b7e | 2015-08-19 22:39:18 +0800 | [diff] [blame] | 1621 | 			page->have_csum = have_csum; | 
 | 1622 | 			if (have_csum) | 
 | 1623 | 				memcpy(page->csum, | 
 | 1624 | 				       original_sblock->pagev[0]->csum, | 
 | 1625 | 				       sctx->csum_size); | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1626 |  | 
| Zhao Lei | 10f1190 | 2015-01-20 15:11:43 +0800 | [diff] [blame] | 1627 | 			scrub_stripe_index_and_offset(logical, | 
 | 1628 | 						      bbio->map_type, | 
 | 1629 | 						      bbio->raid_map, | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1630 | 						      mapped_length, | 
| Zhao Lei | e34c330 | 2015-01-20 15:11:31 +0800 | [diff] [blame] | 1631 | 						      bbio->num_stripes - | 
 | 1632 | 						      bbio->num_tgtdevs, | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1633 | 						      mirror_index, | 
 | 1634 | 						      &stripe_index, | 
 | 1635 | 						      &stripe_offset); | 
 | 1636 | 			page->physical = bbio->stripes[stripe_index].physical + | 
 | 1637 | 					 stripe_offset; | 
 | 1638 | 			page->dev = bbio->stripes[stripe_index].dev; | 
 | 1639 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1640 | 			BUG_ON(page_index >= original_sblock->page_count); | 
 | 1641 | 			page->physical_for_dev_replace = | 
 | 1642 | 				original_sblock->pagev[page_index]-> | 
 | 1643 | 				physical_for_dev_replace; | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1644 | 			/* for missing devices, dev->bdev is NULL */ | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1645 | 			page->mirror_num = mirror_index + 1; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1646 | 			sblock->page_count++; | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1647 | 			page->page = alloc_page(GFP_NOFS); | 
 | 1648 | 			if (!page->page) | 
 | 1649 | 				goto leave_nomem; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1650 |  | 
 | 1651 | 			scrub_get_recover(recover); | 
 | 1652 | 			page->recover = recover; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1653 | 		} | 
| Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1654 | 		scrub_put_recover(fs_info, recover); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1655 | 		length -= sublen; | 
 | 1656 | 		logical += sublen; | 
 | 1657 | 		page_index++; | 
 | 1658 | 	} | 
 | 1659 |  | 
 | 1660 | 	return 0; | 
 | 1661 | } | 
 | 1662 |  | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1663 | struct scrub_bio_ret { | 
 | 1664 | 	struct completion event; | 
 | 1665 | 	int error; | 
 | 1666 | }; | 
 | 1667 |  | 
| Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1668 | static void scrub_bio_wait_endio(struct bio *bio) | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1669 | { | 
 | 1670 | 	struct scrub_bio_ret *ret = bio->bi_private; | 
 | 1671 |  | 
| Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1672 | 	ret->error = bio->bi_error; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1673 | 	complete(&ret->event); | 
 | 1674 | } | 
 | 1675 |  | 
 | 1676 | static inline int scrub_is_page_on_raid56(struct scrub_page *page) | 
 | 1677 | { | 
| Zhao Lei | 10f1190 | 2015-01-20 15:11:43 +0800 | [diff] [blame] | 1678 | 	return page->recover && | 
| Zhao Lei | ffe2d20 | 2015-01-20 15:11:44 +0800 | [diff] [blame] | 1679 | 	       (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1680 | } | 
 | 1681 |  | 
 | 1682 | static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, | 
 | 1683 | 					struct bio *bio, | 
 | 1684 | 					struct scrub_page *page) | 
 | 1685 | { | 
 | 1686 | 	struct scrub_bio_ret done; | 
 | 1687 | 	int ret; | 
 | 1688 |  | 
 | 1689 | 	init_completion(&done.event); | 
 | 1690 | 	done.error = 0; | 
 | 1691 | 	bio->bi_iter.bi_sector = page->logical >> 9; | 
 | 1692 | 	bio->bi_private = &done; | 
 | 1693 | 	bio->bi_end_io = scrub_bio_wait_endio; | 
 | 1694 |  | 
| Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 1695 | 	ret = raid56_parity_recover(fs_info, bio, page->recover->bbio, | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1696 | 				    page->recover->map_length, | 
| Miao Xie | 4245215 | 2014-11-25 16:39:28 +0800 | [diff] [blame] | 1697 | 				    page->mirror_num, 0); | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1698 | 	if (ret) | 
 | 1699 | 		return ret; | 
 | 1700 |  | 
 | 1701 | 	wait_for_completion(&done.event); | 
 | 1702 | 	if (done.error) | 
 | 1703 | 		return -EIO; | 
 | 1704 |  | 
 | 1705 | 	return 0; | 
 | 1706 | } | 
 | 1707 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1708 | /* | 
 | 1709 |  * this function will check the on disk data for checksum errors, header | 
 | 1710 |  * errors and read I/O errors. If any I/O errors happen, the exact pages | 
 | 1711 |  * which are errored are marked as being bad. The goal is to enable scrub | 
 | 1712 |  * to take those pages that are not errored from all the mirrors so that | 
 | 1713 |  * the pages that are errored in the just handled mirror can be repaired. | 
 | 1714 |  */ | 
| Stefan Behrens | 34f5c8e | 2012-11-02 16:16:26 +0100 | [diff] [blame] | 1715 | static void scrub_recheck_block(struct btrfs_fs_info *fs_info, | 
| Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 1716 | 				struct scrub_block *sblock, | 
 | 1717 | 				int retry_failed_mirror) | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1718 | { | 
 | 1719 | 	int page_num; | 
 | 1720 |  | 
 | 1721 | 	sblock->no_io_error_seen = 1; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1722 |  | 
 | 1723 | 	for (page_num = 0; page_num < sblock->page_count; page_num++) { | 
 | 1724 | 		struct bio *bio; | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1725 | 		struct scrub_page *page = sblock->pagev[page_num]; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1726 |  | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1727 | 		if (page->dev->bdev == NULL) { | 
| Stefan Behrens | ea9947b | 2012-05-04 15:16:07 -0400 | [diff] [blame] | 1728 | 			page->io_error = 1; | 
 | 1729 | 			sblock->no_io_error_seen = 0; | 
 | 1730 | 			continue; | 
 | 1731 | 		} | 
 | 1732 |  | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1733 | 		WARN_ON(!page->page); | 
| Chris Mason | 9be3395 | 2013-05-17 18:30:14 -0400 | [diff] [blame] | 1734 | 		bio = btrfs_io_bio_alloc(GFP_NOFS, 1); | 
| Stefan Behrens | 34f5c8e | 2012-11-02 16:16:26 +0100 | [diff] [blame] | 1735 | 		if (!bio) { | 
 | 1736 | 			page->io_error = 1; | 
 | 1737 | 			sblock->no_io_error_seen = 0; | 
 | 1738 | 			continue; | 
 | 1739 | 		} | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1740 | 		bio->bi_bdev = page->dev->bdev; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1741 |  | 
| Stefan Behrens | 34f5c8e | 2012-11-02 16:16:26 +0100 | [diff] [blame] | 1742 | 		bio_add_page(bio, page->page, PAGE_SIZE, 0); | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1743 | 		if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) { | 
| Liu Bo | 1bcd7aa | 2017-03-29 10:55:16 -0700 | [diff] [blame] | 1744 | 			if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) { | 
 | 1745 | 				page->io_error = 1; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1746 | 				sblock->no_io_error_seen = 0; | 
| Liu Bo | 1bcd7aa | 2017-03-29 10:55:16 -0700 | [diff] [blame] | 1747 | 			} | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1748 | 		} else { | 
 | 1749 | 			bio->bi_iter.bi_sector = page->physical >> 9; | 
| Mike Christie | 37226b2 | 2016-06-05 14:31:52 -0500 | [diff] [blame] | 1750 | 			bio_set_op_attrs(bio, REQ_OP_READ, 0); | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1751 |  | 
| Liu Bo | 1bcd7aa | 2017-03-29 10:55:16 -0700 | [diff] [blame] | 1752 | 			if (btrfsic_submit_bio_wait(bio)) { | 
 | 1753 | 				page->io_error = 1; | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1754 | 				sblock->no_io_error_seen = 0; | 
| Liu Bo | 1bcd7aa | 2017-03-29 10:55:16 -0700 | [diff] [blame] | 1755 | 			} | 
| Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1756 | 		} | 
| Kent Overstreet | 33879d4 | 2013-11-23 22:33:32 -0800 | [diff] [blame] | 1757 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1758 | 		bio_put(bio); | 
 | 1759 | 	} | 
 | 1760 |  | 
 | 1761 | 	if (sblock->no_io_error_seen) | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1762 | 		scrub_recheck_block_checksum(sblock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1763 | } | 
 | 1764 |  | 
| Miao Xie | 17a9be2 | 2014-07-24 11:37:08 +0800 | [diff] [blame] | 1765 | static inline int scrub_check_fsid(u8 fsid[], | 
 | 1766 | 				   struct scrub_page *spage) | 
 | 1767 | { | 
 | 1768 | 	struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices; | 
 | 1769 | 	int ret; | 
 | 1770 |  | 
 | 1771 | 	ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE); | 
 | 1772 | 	return !ret; | 
 | 1773 | } | 
 | 1774 |  | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1775 | static void scrub_recheck_block_checksum(struct scrub_block *sblock) | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1776 | { | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1777 | 	sblock->header_error = 0; | 
 | 1778 | 	sblock->checksum_error = 0; | 
 | 1779 | 	sblock->generation_error = 0; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1780 |  | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1781 | 	if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA) | 
 | 1782 | 		scrub_checksum_data(sblock); | 
 | 1783 | 	else | 
 | 1784 | 		scrub_checksum_tree_block(sblock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1785 | } | 
 | 1786 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1787 | static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, | 
| Zhao Lei | 114ab50 | 2015-01-20 15:11:36 +0800 | [diff] [blame] | 1788 | 					     struct scrub_block *sblock_good) | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1789 | { | 
 | 1790 | 	int page_num; | 
 | 1791 | 	int ret = 0; | 
 | 1792 |  | 
 | 1793 | 	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { | 
 | 1794 | 		int ret_sub; | 
 | 1795 |  | 
 | 1796 | 		ret_sub = scrub_repair_page_from_good_copy(sblock_bad, | 
 | 1797 | 							   sblock_good, | 
| Zhao Lei | 114ab50 | 2015-01-20 15:11:36 +0800 | [diff] [blame] | 1798 | 							   page_num, 1); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1799 | 		if (ret_sub) | 
 | 1800 | 			ret = ret_sub; | 
 | 1801 | 	} | 
 | 1802 |  | 
 | 1803 | 	return ret; | 
 | 1804 | } | 
 | 1805 |  | 
 | 1806 | static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, | 
 | 1807 | 					    struct scrub_block *sblock_good, | 
 | 1808 | 					    int page_num, int force_write) | 
 | 1809 | { | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1810 | 	struct scrub_page *page_bad = sblock_bad->pagev[page_num]; | 
 | 1811 | 	struct scrub_page *page_good = sblock_good->pagev[page_num]; | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1812 | 	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1813 |  | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1814 | 	BUG_ON(page_bad->page == NULL); | 
 | 1815 | 	BUG_ON(page_good->page == NULL); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1816 | 	if (force_write || sblock_bad->header_error || | 
 | 1817 | 	    sblock_bad->checksum_error || page_bad->io_error) { | 
 | 1818 | 		struct bio *bio; | 
 | 1819 | 		int ret; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1820 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1821 | 		if (!page_bad->dev->bdev) { | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1822 | 			btrfs_warn_rl(fs_info, | 
| Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 1823 | 				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected"); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1824 | 			return -EIO; | 
 | 1825 | 		} | 
 | 1826 |  | 
| Chris Mason | 9be3395 | 2013-05-17 18:30:14 -0400 | [diff] [blame] | 1827 | 		bio = btrfs_io_bio_alloc(GFP_NOFS, 1); | 
| Tsutomu Itoh | e627ee7 | 2012-04-12 16:03:56 -0400 | [diff] [blame] | 1828 | 		if (!bio) | 
 | 1829 | 			return -EIO; | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1830 | 		bio->bi_bdev = page_bad->dev->bdev; | 
| Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 1831 | 		bio->bi_iter.bi_sector = page_bad->physical >> 9; | 
| Mike Christie | 37226b2 | 2016-06-05 14:31:52 -0500 | [diff] [blame] | 1832 | 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1833 |  | 
 | 1834 | 		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); | 
 | 1835 | 		if (PAGE_SIZE != ret) { | 
 | 1836 | 			bio_put(bio); | 
 | 1837 | 			return -EIO; | 
 | 1838 | 		} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1839 |  | 
| Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 1840 | 		if (btrfsic_submit_bio_wait(bio)) { | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1841 | 			btrfs_dev_stat_inc_and_print(page_bad->dev, | 
 | 1842 | 				BTRFS_DEV_STAT_WRITE_ERRS); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1843 | 			btrfs_dev_replace_stats_inc( | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1844 | 				&fs_info->dev_replace.num_write_errors); | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1845 | 			bio_put(bio); | 
 | 1846 | 			return -EIO; | 
 | 1847 | 		} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1848 | 		bio_put(bio); | 
 | 1849 | 	} | 
 | 1850 |  | 
 | 1851 | 	return 0; | 
 | 1852 | } | 
 | 1853 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1854 | static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) | 
 | 1855 | { | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1856 | 	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1857 | 	int page_num; | 
 | 1858 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 1859 | 	/* | 
 | 1860 | 	 * This block is used for the check of the parity on the source device, | 
 | 1861 | 	 * so the data needn't be written into the destination device. | 
 | 1862 | 	 */ | 
 | 1863 | 	if (sblock->sparity) | 
 | 1864 | 		return; | 
 | 1865 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1866 | 	for (page_num = 0; page_num < sblock->page_count; page_num++) { | 
 | 1867 | 		int ret; | 
 | 1868 |  | 
 | 1869 | 		ret = scrub_write_page_to_dev_replace(sblock, page_num); | 
 | 1870 | 		if (ret) | 
 | 1871 | 			btrfs_dev_replace_stats_inc( | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1872 | 				&fs_info->dev_replace.num_write_errors); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1873 | 	} | 
 | 1874 | } | 
 | 1875 |  | 
 | 1876 | static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, | 
 | 1877 | 					   int page_num) | 
 | 1878 | { | 
 | 1879 | 	struct scrub_page *spage = sblock->pagev[page_num]; | 
 | 1880 |  | 
 | 1881 | 	BUG_ON(spage->page == NULL); | 
 | 1882 | 	if (spage->io_error) { | 
 | 1883 | 		void *mapped_buffer = kmap_atomic(spage->page); | 
 | 1884 |  | 
| David Sterba | 619a974 | 2017-03-29 20:48:44 +0200 | [diff] [blame] | 1885 | 		clear_page(mapped_buffer); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1886 | 		flush_dcache_page(spage->page); | 
 | 1887 | 		kunmap_atomic(mapped_buffer); | 
 | 1888 | 	} | 
 | 1889 | 	return scrub_add_page_to_wr_bio(sblock->sctx, spage); | 
 | 1890 | } | 
 | 1891 |  | 
 | 1892 | static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, | 
 | 1893 | 				    struct scrub_page *spage) | 
 | 1894 | { | 
 | 1895 | 	struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; | 
 | 1896 | 	struct scrub_bio *sbio; | 
 | 1897 | 	int ret; | 
 | 1898 |  | 
 | 1899 | 	mutex_lock(&wr_ctx->wr_lock); | 
 | 1900 | again: | 
 | 1901 | 	if (!wr_ctx->wr_curr_bio) { | 
 | 1902 | 		wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio), | 
| David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 1903 | 					      GFP_KERNEL); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1904 | 		if (!wr_ctx->wr_curr_bio) { | 
 | 1905 | 			mutex_unlock(&wr_ctx->wr_lock); | 
 | 1906 | 			return -ENOMEM; | 
 | 1907 | 		} | 
 | 1908 | 		wr_ctx->wr_curr_bio->sctx = sctx; | 
 | 1909 | 		wr_ctx->wr_curr_bio->page_count = 0; | 
 | 1910 | 	} | 
 | 1911 | 	sbio = wr_ctx->wr_curr_bio; | 
 | 1912 | 	if (sbio->page_count == 0) { | 
 | 1913 | 		struct bio *bio; | 
 | 1914 |  | 
 | 1915 | 		sbio->physical = spage->physical_for_dev_replace; | 
 | 1916 | 		sbio->logical = spage->logical; | 
 | 1917 | 		sbio->dev = wr_ctx->tgtdev; | 
 | 1918 | 		bio = sbio->bio; | 
 | 1919 | 		if (!bio) { | 
| David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 1920 | 			bio = btrfs_io_bio_alloc(GFP_KERNEL, | 
 | 1921 | 					wr_ctx->pages_per_wr_bio); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1922 | 			if (!bio) { | 
 | 1923 | 				mutex_unlock(&wr_ctx->wr_lock); | 
 | 1924 | 				return -ENOMEM; | 
 | 1925 | 			} | 
 | 1926 | 			sbio->bio = bio; | 
 | 1927 | 		} | 
 | 1928 |  | 
 | 1929 | 		bio->bi_private = sbio; | 
 | 1930 | 		bio->bi_end_io = scrub_wr_bio_end_io; | 
 | 1931 | 		bio->bi_bdev = sbio->dev->bdev; | 
| Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 1932 | 		bio->bi_iter.bi_sector = sbio->physical >> 9; | 
| Mike Christie | 37226b2 | 2016-06-05 14:31:52 -0500 | [diff] [blame] | 1933 | 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1934 | 		sbio->err = 0; | 
 | 1935 | 	} else if (sbio->physical + sbio->page_count * PAGE_SIZE != | 
 | 1936 | 		   spage->physical_for_dev_replace || | 
 | 1937 | 		   sbio->logical + sbio->page_count * PAGE_SIZE != | 
 | 1938 | 		   spage->logical) { | 
 | 1939 | 		scrub_wr_submit(sctx); | 
 | 1940 | 		goto again; | 
 | 1941 | 	} | 
 | 1942 |  | 
 | 1943 | 	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); | 
 | 1944 | 	if (ret != PAGE_SIZE) { | 
 | 1945 | 		if (sbio->page_count < 1) { | 
 | 1946 | 			bio_put(sbio->bio); | 
 | 1947 | 			sbio->bio = NULL; | 
 | 1948 | 			mutex_unlock(&wr_ctx->wr_lock); | 
 | 1949 | 			return -EIO; | 
 | 1950 | 		} | 
 | 1951 | 		scrub_wr_submit(sctx); | 
 | 1952 | 		goto again; | 
 | 1953 | 	} | 
 | 1954 |  | 
 | 1955 | 	sbio->pagev[sbio->page_count] = spage; | 
 | 1956 | 	scrub_page_get(spage); | 
 | 1957 | 	sbio->page_count++; | 
 | 1958 | 	if (sbio->page_count == wr_ctx->pages_per_wr_bio) | 
 | 1959 | 		scrub_wr_submit(sctx); | 
 | 1960 | 	mutex_unlock(&wr_ctx->wr_lock); | 
 | 1961 |  | 
 | 1962 | 	return 0; | 
 | 1963 | } | 
 | 1964 |  | 
 | 1965 | static void scrub_wr_submit(struct scrub_ctx *sctx) | 
 | 1966 | { | 
 | 1967 | 	struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; | 
 | 1968 | 	struct scrub_bio *sbio; | 
 | 1969 |  | 
 | 1970 | 	if (!wr_ctx->wr_curr_bio) | 
 | 1971 | 		return; | 
 | 1972 |  | 
 | 1973 | 	sbio = wr_ctx->wr_curr_bio; | 
 | 1974 | 	wr_ctx->wr_curr_bio = NULL; | 
 | 1975 | 	WARN_ON(!sbio->bio->bi_bdev); | 
 | 1976 | 	scrub_pending_bio_inc(sctx); | 
 | 1977 | 	/* process all writes in a single worker thread. Then the block layer | 
 | 1978 | 	 * orders the requests before sending them to the driver which | 
 | 1979 | 	 * doubled the write performance on spinning disks when measured | 
 | 1980 | 	 * with Linux 3.5 */ | 
| Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 1981 | 	btrfsic_submit_bio(sbio->bio); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1982 | } | 
 | 1983 |  | 
| Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1984 | static void scrub_wr_bio_end_io(struct bio *bio) | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1985 | { | 
 | 1986 | 	struct scrub_bio *sbio = bio->bi_private; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 1987 | 	struct btrfs_fs_info *fs_info = sbio->dev->fs_info; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1988 |  | 
| Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1989 | 	sbio->err = bio->bi_error; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1990 | 	sbio->bio = bio; | 
 | 1991 |  | 
| Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 1992 | 	btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, | 
 | 1993 | 			 scrub_wr_bio_end_io_worker, NULL, NULL); | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 1994 | 	btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1995 | } | 
 | 1996 |  | 
 | 1997 | static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) | 
 | 1998 | { | 
 | 1999 | 	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); | 
 | 2000 | 	struct scrub_ctx *sctx = sbio->sctx; | 
 | 2001 | 	int i; | 
 | 2002 |  | 
 | 2003 | 	WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); | 
 | 2004 | 	if (sbio->err) { | 
 | 2005 | 		struct btrfs_dev_replace *dev_replace = | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 2006 | 			&sbio->sctx->fs_info->dev_replace; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2007 |  | 
 | 2008 | 		for (i = 0; i < sbio->page_count; i++) { | 
 | 2009 | 			struct scrub_page *spage = sbio->pagev[i]; | 
 | 2010 |  | 
 | 2011 | 			spage->io_error = 1; | 
 | 2012 | 			btrfs_dev_replace_stats_inc(&dev_replace-> | 
 | 2013 | 						    num_write_errors); | 
 | 2014 | 		} | 
 | 2015 | 	} | 
 | 2016 |  | 
 | 2017 | 	for (i = 0; i < sbio->page_count; i++) | 
 | 2018 | 		scrub_page_put(sbio->pagev[i]); | 
 | 2019 |  | 
 | 2020 | 	bio_put(sbio->bio); | 
 | 2021 | 	kfree(sbio); | 
 | 2022 | 	scrub_pending_bio_dec(sctx); | 
 | 2023 | } | 
 | 2024 |  | 
 | 2025 | static int scrub_checksum(struct scrub_block *sblock) | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2026 | { | 
 | 2027 | 	u64 flags; | 
 | 2028 | 	int ret; | 
 | 2029 |  | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 2030 | 	/* | 
 | 2031 | 	 * No need to initialize these stats currently, | 
 | 2032 | 	 * because this function only use return value | 
 | 2033 | 	 * instead of these stats value. | 
 | 2034 | 	 * | 
 | 2035 | 	 * Todo: | 
 | 2036 | 	 * always use stats | 
 | 2037 | 	 */ | 
 | 2038 | 	sblock->header_error = 0; | 
 | 2039 | 	sblock->generation_error = 0; | 
 | 2040 | 	sblock->checksum_error = 0; | 
 | 2041 |  | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2042 | 	WARN_ON(sblock->page_count < 1); | 
 | 2043 | 	flags = sblock->pagev[0]->flags; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2044 | 	ret = 0; | 
 | 2045 | 	if (flags & BTRFS_EXTENT_FLAG_DATA) | 
 | 2046 | 		ret = scrub_checksum_data(sblock); | 
 | 2047 | 	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) | 
 | 2048 | 		ret = scrub_checksum_tree_block(sblock); | 
 | 2049 | 	else if (flags & BTRFS_EXTENT_FLAG_SUPER) | 
 | 2050 | 		(void)scrub_checksum_super(sblock); | 
 | 2051 | 	else | 
 | 2052 | 		WARN_ON(1); | 
 | 2053 | 	if (ret) | 
 | 2054 | 		scrub_handle_errored_block(sblock); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2055 |  | 
 | 2056 | 	return ret; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2057 | } | 
 | 2058 |  | 
 | 2059 | static int scrub_checksum_data(struct scrub_block *sblock) | 
 | 2060 | { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2061 | 	struct scrub_ctx *sctx = sblock->sctx; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2062 | 	u8 csum[BTRFS_CSUM_SIZE]; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2063 | 	u8 *on_disk_csum; | 
 | 2064 | 	struct page *page; | 
 | 2065 | 	void *buffer; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2066 | 	u32 crc = ~(u32)0; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2067 | 	u64 len; | 
 | 2068 | 	int index; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2069 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2070 | 	BUG_ON(sblock->page_count < 1); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2071 | 	if (!sblock->pagev[0]->have_csum) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2072 | 		return 0; | 
 | 2073 |  | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2074 | 	on_disk_csum = sblock->pagev[0]->csum; | 
 | 2075 | 	page = sblock->pagev[0]->page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 2076 | 	buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2077 |  | 
| David Sterba | 25cc122 | 2017-05-16 19:10:41 +0200 | [diff] [blame^] | 2078 | 	len = sctx->fs_info->sectorsize; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2079 | 	index = 0; | 
 | 2080 | 	for (;;) { | 
 | 2081 | 		u64 l = min_t(u64, len, PAGE_SIZE); | 
 | 2082 |  | 
| Liu Bo | b049668 | 2013-03-14 14:57:45 +0000 | [diff] [blame] | 2083 | 		crc = btrfs_csum_data(buffer, crc, l); | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 2084 | 		kunmap_atomic(buffer); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2085 | 		len -= l; | 
 | 2086 | 		if (len == 0) | 
 | 2087 | 			break; | 
 | 2088 | 		index++; | 
 | 2089 | 		BUG_ON(index >= sblock->page_count); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2090 | 		BUG_ON(!sblock->pagev[index]->page); | 
 | 2091 | 		page = sblock->pagev[index]->page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 2092 | 		buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2093 | 	} | 
 | 2094 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2095 | 	btrfs_csum_final(crc, csum); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2096 | 	if (memcmp(csum, on_disk_csum, sctx->csum_size)) | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 2097 | 		sblock->checksum_error = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2098 |  | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 2099 | 	return sblock->checksum_error; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2100 | } | 
 | 2101 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2102 | static int scrub_checksum_tree_block(struct scrub_block *sblock) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2103 | { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2104 | 	struct scrub_ctx *sctx = sblock->sctx; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2105 | 	struct btrfs_header *h; | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2106 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2107 | 	u8 calculated_csum[BTRFS_CSUM_SIZE]; | 
 | 2108 | 	u8 on_disk_csum[BTRFS_CSUM_SIZE]; | 
 | 2109 | 	struct page *page; | 
 | 2110 | 	void *mapped_buffer; | 
 | 2111 | 	u64 mapped_size; | 
 | 2112 | 	void *p; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2113 | 	u32 crc = ~(u32)0; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2114 | 	u64 len; | 
 | 2115 | 	int index; | 
 | 2116 |  | 
 | 2117 | 	BUG_ON(sblock->page_count < 1); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2118 | 	page = sblock->pagev[0]->page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 2119 | 	mapped_buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2120 | 	h = (struct btrfs_header *)mapped_buffer; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2121 | 	memcpy(on_disk_csum, h->csum, sctx->csum_size); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2122 |  | 
 | 2123 | 	/* | 
 | 2124 | 	 * we don't use the getter functions here, as we | 
 | 2125 | 	 * a) don't have an extent buffer and | 
 | 2126 | 	 * b) the page is already kmapped | 
 | 2127 | 	 */ | 
| Qu Wenruo | 3cae210 | 2013-07-16 11:19:18 +0800 | [diff] [blame] | 2128 | 	if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h)) | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 2129 | 		sblock->header_error = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2130 |  | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 2131 | 	if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) { | 
 | 2132 | 		sblock->header_error = 1; | 
 | 2133 | 		sblock->generation_error = 1; | 
 | 2134 | 	} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2135 |  | 
| Miao Xie | 17a9be2 | 2014-07-24 11:37:08 +0800 | [diff] [blame] | 2136 | 	if (!scrub_check_fsid(h->fsid, sblock->pagev[0])) | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 2137 | 		sblock->header_error = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2138 |  | 
 | 2139 | 	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, | 
 | 2140 | 		   BTRFS_UUID_SIZE)) | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 2141 | 		sblock->header_error = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2142 |  | 
| David Sterba | 25cc122 | 2017-05-16 19:10:41 +0200 | [diff] [blame^] | 2143 | 	len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2144 | 	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; | 
 | 2145 | 	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; | 
 | 2146 | 	index = 0; | 
 | 2147 | 	for (;;) { | 
 | 2148 | 		u64 l = min_t(u64, len, mapped_size); | 
 | 2149 |  | 
| Liu Bo | b049668 | 2013-03-14 14:57:45 +0000 | [diff] [blame] | 2150 | 		crc = btrfs_csum_data(p, crc, l); | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 2151 | 		kunmap_atomic(mapped_buffer); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2152 | 		len -= l; | 
 | 2153 | 		if (len == 0) | 
 | 2154 | 			break; | 
 | 2155 | 		index++; | 
 | 2156 | 		BUG_ON(index >= sblock->page_count); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2157 | 		BUG_ON(!sblock->pagev[index]->page); | 
 | 2158 | 		page = sblock->pagev[index]->page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 2159 | 		mapped_buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2160 | 		mapped_size = PAGE_SIZE; | 
 | 2161 | 		p = mapped_buffer; | 
 | 2162 | 	} | 
 | 2163 |  | 
 | 2164 | 	btrfs_csum_final(crc, calculated_csum); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2165 | 	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 2166 | 		sblock->checksum_error = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2167 |  | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 2168 | 	return sblock->header_error || sblock->checksum_error; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2169 | } | 
 | 2170 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2171 | static int scrub_checksum_super(struct scrub_block *sblock) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2172 | { | 
 | 2173 | 	struct btrfs_super_block *s; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2174 | 	struct scrub_ctx *sctx = sblock->sctx; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2175 | 	u8 calculated_csum[BTRFS_CSUM_SIZE]; | 
 | 2176 | 	u8 on_disk_csum[BTRFS_CSUM_SIZE]; | 
 | 2177 | 	struct page *page; | 
 | 2178 | 	void *mapped_buffer; | 
 | 2179 | 	u64 mapped_size; | 
 | 2180 | 	void *p; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2181 | 	u32 crc = ~(u32)0; | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 2182 | 	int fail_gen = 0; | 
 | 2183 | 	int fail_cor = 0; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2184 | 	u64 len; | 
 | 2185 | 	int index; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2186 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2187 | 	BUG_ON(sblock->page_count < 1); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2188 | 	page = sblock->pagev[0]->page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 2189 | 	mapped_buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2190 | 	s = (struct btrfs_super_block *)mapped_buffer; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2191 | 	memcpy(on_disk_csum, s->csum, sctx->csum_size); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2192 |  | 
| Qu Wenruo | 3cae210 | 2013-07-16 11:19:18 +0800 | [diff] [blame] | 2193 | 	if (sblock->pagev[0]->logical != btrfs_super_bytenr(s)) | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 2194 | 		++fail_cor; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2195 |  | 
| Qu Wenruo | 3cae210 | 2013-07-16 11:19:18 +0800 | [diff] [blame] | 2196 | 	if (sblock->pagev[0]->generation != btrfs_super_generation(s)) | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 2197 | 		++fail_gen; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2198 |  | 
| Miao Xie | 17a9be2 | 2014-07-24 11:37:08 +0800 | [diff] [blame] | 2199 | 	if (!scrub_check_fsid(s->fsid, sblock->pagev[0])) | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 2200 | 		++fail_cor; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2201 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2202 | 	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; | 
 | 2203 | 	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; | 
 | 2204 | 	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; | 
 | 2205 | 	index = 0; | 
 | 2206 | 	for (;;) { | 
 | 2207 | 		u64 l = min_t(u64, len, mapped_size); | 
 | 2208 |  | 
| Liu Bo | b049668 | 2013-03-14 14:57:45 +0000 | [diff] [blame] | 2209 | 		crc = btrfs_csum_data(p, crc, l); | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 2210 | 		kunmap_atomic(mapped_buffer); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2211 | 		len -= l; | 
 | 2212 | 		if (len == 0) | 
 | 2213 | 			break; | 
 | 2214 | 		index++; | 
 | 2215 | 		BUG_ON(index >= sblock->page_count); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2216 | 		BUG_ON(!sblock->pagev[index]->page); | 
 | 2217 | 		page = sblock->pagev[index]->page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 2218 | 		mapped_buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2219 | 		mapped_size = PAGE_SIZE; | 
 | 2220 | 		p = mapped_buffer; | 
 | 2221 | 	} | 
 | 2222 |  | 
 | 2223 | 	btrfs_csum_final(crc, calculated_csum); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2224 | 	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 2225 | 		++fail_cor; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2226 |  | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 2227 | 	if (fail_cor + fail_gen) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2228 | 		/* | 
 | 2229 | 		 * if we find an error in a super block, we just report it. | 
 | 2230 | 		 * They will get written with the next transaction commit | 
 | 2231 | 		 * anyway | 
 | 2232 | 		 */ | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2233 | 		spin_lock(&sctx->stat_lock); | 
 | 2234 | 		++sctx->stat.super_errors; | 
 | 2235 | 		spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 2236 | 		if (fail_cor) | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2237 | 			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 2238 | 				BTRFS_DEV_STAT_CORRUPTION_ERRS); | 
 | 2239 | 		else | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2240 | 			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev, | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 2241 | 				BTRFS_DEV_STAT_GENERATION_ERRS); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2242 | 	} | 
 | 2243 |  | 
| Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 2244 | 	return fail_cor + fail_gen; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2245 | } | 
 | 2246 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2247 | static void scrub_block_get(struct scrub_block *sblock) | 
 | 2248 | { | 
| Elena Reshetova | 186debd | 2017-03-03 10:55:23 +0200 | [diff] [blame] | 2249 | 	refcount_inc(&sblock->refs); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2250 | } | 
 | 2251 |  | 
 | 2252 | static void scrub_block_put(struct scrub_block *sblock) | 
 | 2253 | { | 
| Elena Reshetova | 186debd | 2017-03-03 10:55:23 +0200 | [diff] [blame] | 2254 | 	if (refcount_dec_and_test(&sblock->refs)) { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2255 | 		int i; | 
 | 2256 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2257 | 		if (sblock->sparity) | 
 | 2258 | 			scrub_parity_put(sblock->sparity); | 
 | 2259 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2260 | 		for (i = 0; i < sblock->page_count; i++) | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2261 | 			scrub_page_put(sblock->pagev[i]); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2262 | 		kfree(sblock); | 
 | 2263 | 	} | 
 | 2264 | } | 
 | 2265 |  | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2266 | static void scrub_page_get(struct scrub_page *spage) | 
 | 2267 | { | 
| Zhao Lei | 5701934 | 2015-01-20 15:11:45 +0800 | [diff] [blame] | 2268 | 	atomic_inc(&spage->refs); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2269 | } | 
 | 2270 |  | 
 | 2271 | static void scrub_page_put(struct scrub_page *spage) | 
 | 2272 | { | 
| Zhao Lei | 5701934 | 2015-01-20 15:11:45 +0800 | [diff] [blame] | 2273 | 	if (atomic_dec_and_test(&spage->refs)) { | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2274 | 		if (spage->page) | 
 | 2275 | 			__free_page(spage->page); | 
 | 2276 | 		kfree(spage); | 
 | 2277 | 	} | 
 | 2278 | } | 
 | 2279 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2280 | static void scrub_submit(struct scrub_ctx *sctx) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2281 | { | 
 | 2282 | 	struct scrub_bio *sbio; | 
 | 2283 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2284 | 	if (sctx->curr == -1) | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 2285 | 		return; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2286 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2287 | 	sbio = sctx->bios[sctx->curr]; | 
 | 2288 | 	sctx->curr = -1; | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 2289 | 	scrub_pending_bio_inc(sctx); | 
| Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 2290 | 	btrfsic_submit_bio(sbio->bio); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2291 | } | 
 | 2292 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2293 | static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, | 
 | 2294 | 				    struct scrub_page *spage) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2295 | { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2296 | 	struct scrub_block *sblock = spage->sblock; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2297 | 	struct scrub_bio *sbio; | 
| Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 2298 | 	int ret; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2299 |  | 
 | 2300 | again: | 
 | 2301 | 	/* | 
 | 2302 | 	 * grab a fresh bio or wait for one to become available | 
 | 2303 | 	 */ | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2304 | 	while (sctx->curr == -1) { | 
 | 2305 | 		spin_lock(&sctx->list_lock); | 
 | 2306 | 		sctx->curr = sctx->first_free; | 
 | 2307 | 		if (sctx->curr != -1) { | 
 | 2308 | 			sctx->first_free = sctx->bios[sctx->curr]->next_free; | 
 | 2309 | 			sctx->bios[sctx->curr]->next_free = -1; | 
 | 2310 | 			sctx->bios[sctx->curr]->page_count = 0; | 
 | 2311 | 			spin_unlock(&sctx->list_lock); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2312 | 		} else { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2313 | 			spin_unlock(&sctx->list_lock); | 
 | 2314 | 			wait_event(sctx->list_wait, sctx->first_free != -1); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2315 | 		} | 
 | 2316 | 	} | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2317 | 	sbio = sctx->bios[sctx->curr]; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2318 | 	if (sbio->page_count == 0) { | 
| Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 2319 | 		struct bio *bio; | 
 | 2320 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2321 | 		sbio->physical = spage->physical; | 
 | 2322 | 		sbio->logical = spage->logical; | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2323 | 		sbio->dev = spage->dev; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2324 | 		bio = sbio->bio; | 
 | 2325 | 		if (!bio) { | 
| David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2326 | 			bio = btrfs_io_bio_alloc(GFP_KERNEL, | 
 | 2327 | 					sctx->pages_per_rd_bio); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2328 | 			if (!bio) | 
 | 2329 | 				return -ENOMEM; | 
 | 2330 | 			sbio->bio = bio; | 
 | 2331 | 		} | 
| Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 2332 |  | 
 | 2333 | 		bio->bi_private = sbio; | 
 | 2334 | 		bio->bi_end_io = scrub_bio_end_io; | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2335 | 		bio->bi_bdev = sbio->dev->bdev; | 
| Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 2336 | 		bio->bi_iter.bi_sector = sbio->physical >> 9; | 
| Mike Christie | 37226b2 | 2016-06-05 14:31:52 -0500 | [diff] [blame] | 2337 | 		bio_set_op_attrs(bio, REQ_OP_READ, 0); | 
| Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 2338 | 		sbio->err = 0; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2339 | 	} else if (sbio->physical + sbio->page_count * PAGE_SIZE != | 
 | 2340 | 		   spage->physical || | 
 | 2341 | 		   sbio->logical + sbio->page_count * PAGE_SIZE != | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2342 | 		   spage->logical || | 
 | 2343 | 		   sbio->dev != spage->dev) { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2344 | 		scrub_submit(sctx); | 
| Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 2345 | 		goto again; | 
 | 2346 | 	} | 
 | 2347 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2348 | 	sbio->pagev[sbio->page_count] = spage; | 
 | 2349 | 	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); | 
 | 2350 | 	if (ret != PAGE_SIZE) { | 
 | 2351 | 		if (sbio->page_count < 1) { | 
 | 2352 | 			bio_put(sbio->bio); | 
 | 2353 | 			sbio->bio = NULL; | 
 | 2354 | 			return -EIO; | 
 | 2355 | 		} | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2356 | 		scrub_submit(sctx); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2357 | 		goto again; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2358 | 	} | 
| Arne Jansen | 1bc8779 | 2011-05-28 21:57:55 +0200 | [diff] [blame] | 2359 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2360 | 	scrub_block_get(sblock); /* one for the page added to the bio */ | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2361 | 	atomic_inc(&sblock->outstanding_pages); | 
 | 2362 | 	sbio->page_count++; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2363 | 	if (sbio->page_count == sctx->pages_per_rd_bio) | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2364 | 		scrub_submit(sctx); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2365 |  | 
 | 2366 | 	return 0; | 
 | 2367 | } | 
 | 2368 |  | 
| Linus Torvalds | 2236597 | 2015-09-05 15:14:43 -0700 | [diff] [blame] | 2369 | static void scrub_missing_raid56_end_io(struct bio *bio) | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2370 | { | 
 | 2371 | 	struct scrub_block *sblock = bio->bi_private; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 2372 | 	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2373 |  | 
| Linus Torvalds | 2236597 | 2015-09-05 15:14:43 -0700 | [diff] [blame] | 2374 | 	if (bio->bi_error) | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2375 | 		sblock->no_io_error_seen = 0; | 
 | 2376 |  | 
| Scott Talbert | 4673272 | 2016-05-09 09:14:28 -0400 | [diff] [blame] | 2377 | 	bio_put(bio); | 
 | 2378 |  | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2379 | 	btrfs_queue_work(fs_info->scrub_workers, &sblock->work); | 
 | 2380 | } | 
 | 2381 |  | 
 | 2382 | static void scrub_missing_raid56_worker(struct btrfs_work *work) | 
 | 2383 | { | 
 | 2384 | 	struct scrub_block *sblock = container_of(work, struct scrub_block, work); | 
 | 2385 | 	struct scrub_ctx *sctx = sblock->sctx; | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2386 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2387 | 	u64 logical; | 
 | 2388 | 	struct btrfs_device *dev; | 
 | 2389 |  | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2390 | 	logical = sblock->pagev[0]->logical; | 
 | 2391 | 	dev = sblock->pagev[0]->dev; | 
 | 2392 |  | 
| Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 2393 | 	if (sblock->no_io_error_seen) | 
| Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 2394 | 		scrub_recheck_block_checksum(sblock); | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2395 |  | 
 | 2396 | 	if (!sblock->no_io_error_seen) { | 
 | 2397 | 		spin_lock(&sctx->stat_lock); | 
 | 2398 | 		sctx->stat.read_errors++; | 
 | 2399 | 		spin_unlock(&sctx->stat_lock); | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2400 | 		btrfs_err_rl_in_rcu(fs_info, | 
| David Sterba | b14af3b | 2015-10-08 10:43:10 +0200 | [diff] [blame] | 2401 | 			"IO error rebuilding logical %llu for dev %s", | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2402 | 			logical, rcu_str_deref(dev->name)); | 
 | 2403 | 	} else if (sblock->header_error || sblock->checksum_error) { | 
 | 2404 | 		spin_lock(&sctx->stat_lock); | 
 | 2405 | 		sctx->stat.uncorrectable_errors++; | 
 | 2406 | 		spin_unlock(&sctx->stat_lock); | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2407 | 		btrfs_err_rl_in_rcu(fs_info, | 
| David Sterba | b14af3b | 2015-10-08 10:43:10 +0200 | [diff] [blame] | 2408 | 			"failed to rebuild valid logical %llu for dev %s", | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2409 | 			logical, rcu_str_deref(dev->name)); | 
 | 2410 | 	} else { | 
 | 2411 | 		scrub_write_block_to_dev_replace(sblock); | 
 | 2412 | 	} | 
 | 2413 |  | 
 | 2414 | 	scrub_block_put(sblock); | 
 | 2415 |  | 
 | 2416 | 	if (sctx->is_dev_replace && | 
 | 2417 | 	    atomic_read(&sctx->wr_ctx.flush_all_writes)) { | 
 | 2418 | 		mutex_lock(&sctx->wr_ctx.wr_lock); | 
 | 2419 | 		scrub_wr_submit(sctx); | 
 | 2420 | 		mutex_unlock(&sctx->wr_ctx.wr_lock); | 
 | 2421 | 	} | 
 | 2422 |  | 
 | 2423 | 	scrub_pending_bio_dec(sctx); | 
 | 2424 | } | 
 | 2425 |  | 
 | 2426 | static void scrub_missing_raid56_pages(struct scrub_block *sblock) | 
 | 2427 | { | 
 | 2428 | 	struct scrub_ctx *sctx = sblock->sctx; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 2429 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2430 | 	u64 length = sblock->page_count * PAGE_SIZE; | 
 | 2431 | 	u64 logical = sblock->pagev[0]->logical; | 
| Zhao Lei | f1fee65 | 2016-05-17 17:37:38 +0800 | [diff] [blame] | 2432 | 	struct btrfs_bio *bbio = NULL; | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2433 | 	struct bio *bio; | 
 | 2434 | 	struct btrfs_raid_bio *rbio; | 
 | 2435 | 	int ret; | 
 | 2436 | 	int i; | 
 | 2437 |  | 
| Qu Wenruo | ae6529c | 2017-03-29 09:33:21 +0800 | [diff] [blame] | 2438 | 	btrfs_bio_counter_inc_blocked(fs_info); | 
| Christoph Hellwig | cf8cddd | 2016-10-27 09:27:36 +0200 | [diff] [blame] | 2439 | 	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, | 
| David Sterba | 825ad4c | 2017-03-28 14:45:22 +0200 | [diff] [blame] | 2440 | 			&length, &bbio); | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2441 | 	if (ret || !bbio || !bbio->raid_map) | 
 | 2442 | 		goto bbio_out; | 
 | 2443 |  | 
 | 2444 | 	if (WARN_ON(!sctx->is_dev_replace || | 
 | 2445 | 		    !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) { | 
 | 2446 | 		/* | 
 | 2447 | 		 * We shouldn't be scrubbing a missing device. Even for dev | 
 | 2448 | 		 * replace, we should only get here for RAID 5/6. We either | 
 | 2449 | 		 * managed to mount something with no mirrors remaining or | 
 | 2450 | 		 * there's a bug in scrub_remap_extent()/btrfs_map_block(). | 
 | 2451 | 		 */ | 
 | 2452 | 		goto bbio_out; | 
 | 2453 | 	} | 
 | 2454 |  | 
 | 2455 | 	bio = btrfs_io_bio_alloc(GFP_NOFS, 0); | 
 | 2456 | 	if (!bio) | 
 | 2457 | 		goto bbio_out; | 
 | 2458 |  | 
 | 2459 | 	bio->bi_iter.bi_sector = logical >> 9; | 
 | 2460 | 	bio->bi_private = sblock; | 
 | 2461 | 	bio->bi_end_io = scrub_missing_raid56_end_io; | 
 | 2462 |  | 
| Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 2463 | 	rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length); | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2464 | 	if (!rbio) | 
 | 2465 | 		goto rbio_out; | 
 | 2466 |  | 
 | 2467 | 	for (i = 0; i < sblock->page_count; i++) { | 
 | 2468 | 		struct scrub_page *spage = sblock->pagev[i]; | 
 | 2469 |  | 
 | 2470 | 		raid56_add_scrub_pages(rbio, spage->page, spage->logical); | 
 | 2471 | 	} | 
 | 2472 |  | 
 | 2473 | 	btrfs_init_work(&sblock->work, btrfs_scrub_helper, | 
 | 2474 | 			scrub_missing_raid56_worker, NULL, NULL); | 
 | 2475 | 	scrub_block_get(sblock); | 
 | 2476 | 	scrub_pending_bio_inc(sctx); | 
 | 2477 | 	raid56_submit_missing_rbio(rbio); | 
 | 2478 | 	return; | 
 | 2479 |  | 
 | 2480 | rbio_out: | 
 | 2481 | 	bio_put(bio); | 
 | 2482 | bbio_out: | 
| Qu Wenruo | ae6529c | 2017-03-29 09:33:21 +0800 | [diff] [blame] | 2483 | 	btrfs_bio_counter_dec(fs_info); | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2484 | 	btrfs_put_bbio(bbio); | 
 | 2485 | 	spin_lock(&sctx->stat_lock); | 
 | 2486 | 	sctx->stat.malloc_errors++; | 
 | 2487 | 	spin_unlock(&sctx->stat_lock); | 
 | 2488 | } | 
 | 2489 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2490 | static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2491 | 		       u64 physical, struct btrfs_device *dev, u64 flags, | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2492 | 		       u64 gen, int mirror_num, u8 *csum, int force, | 
 | 2493 | 		       u64 physical_for_dev_replace) | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2494 | { | 
 | 2495 | 	struct scrub_block *sblock; | 
 | 2496 | 	int index; | 
 | 2497 |  | 
| David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2498 | 	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2499 | 	if (!sblock) { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2500 | 		spin_lock(&sctx->stat_lock); | 
 | 2501 | 		sctx->stat.malloc_errors++; | 
 | 2502 | 		spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2503 | 		return -ENOMEM; | 
 | 2504 | 	} | 
 | 2505 |  | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2506 | 	/* one ref inside this function, plus one for each page added to | 
 | 2507 | 	 * a bio later on */ | 
| Elena Reshetova | 186debd | 2017-03-03 10:55:23 +0200 | [diff] [blame] | 2508 | 	refcount_set(&sblock->refs, 1); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2509 | 	sblock->sctx = sctx; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2510 | 	sblock->no_io_error_seen = 1; | 
 | 2511 |  | 
 | 2512 | 	for (index = 0; len > 0; index++) { | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2513 | 		struct scrub_page *spage; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2514 | 		u64 l = min_t(u64, len, PAGE_SIZE); | 
 | 2515 |  | 
| David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2516 | 		spage = kzalloc(sizeof(*spage), GFP_KERNEL); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2517 | 		if (!spage) { | 
 | 2518 | leave_nomem: | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2519 | 			spin_lock(&sctx->stat_lock); | 
 | 2520 | 			sctx->stat.malloc_errors++; | 
 | 2521 | 			spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2522 | 			scrub_block_put(sblock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2523 | 			return -ENOMEM; | 
 | 2524 | 		} | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2525 | 		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); | 
 | 2526 | 		scrub_page_get(spage); | 
 | 2527 | 		sblock->pagev[index] = spage; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2528 | 		spage->sblock = sblock; | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2529 | 		spage->dev = dev; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2530 | 		spage->flags = flags; | 
 | 2531 | 		spage->generation = gen; | 
 | 2532 | 		spage->logical = logical; | 
 | 2533 | 		spage->physical = physical; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2534 | 		spage->physical_for_dev_replace = physical_for_dev_replace; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2535 | 		spage->mirror_num = mirror_num; | 
 | 2536 | 		if (csum) { | 
 | 2537 | 			spage->have_csum = 1; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2538 | 			memcpy(spage->csum, csum, sctx->csum_size); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2539 | 		} else { | 
 | 2540 | 			spage->have_csum = 0; | 
 | 2541 | 		} | 
 | 2542 | 		sblock->page_count++; | 
| David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2543 | 		spage->page = alloc_page(GFP_KERNEL); | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2544 | 		if (!spage->page) | 
 | 2545 | 			goto leave_nomem; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2546 | 		len -= l; | 
 | 2547 | 		logical += l; | 
 | 2548 | 		physical += l; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2549 | 		physical_for_dev_replace += l; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2550 | 	} | 
 | 2551 |  | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2552 | 	WARN_ON(sblock->page_count == 0); | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2553 | 	if (dev->missing) { | 
 | 2554 | 		/* | 
 | 2555 | 		 * This case should only be hit for RAID 5/6 device replace. See | 
 | 2556 | 		 * the comment in scrub_missing_raid56_pages() for details. | 
 | 2557 | 		 */ | 
 | 2558 | 		scrub_missing_raid56_pages(sblock); | 
 | 2559 | 	} else { | 
 | 2560 | 		for (index = 0; index < sblock->page_count; index++) { | 
 | 2561 | 			struct scrub_page *spage = sblock->pagev[index]; | 
 | 2562 | 			int ret; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2563 |  | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2564 | 			ret = scrub_add_page_to_rd_bio(sctx, spage); | 
 | 2565 | 			if (ret) { | 
 | 2566 | 				scrub_block_put(sblock); | 
 | 2567 | 				return ret; | 
 | 2568 | 			} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2569 | 		} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2570 |  | 
| Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2571 | 		if (force) | 
 | 2572 | 			scrub_submit(sctx); | 
 | 2573 | 	} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2574 |  | 
 | 2575 | 	/* last one frees, either here or in bio completion for last page */ | 
 | 2576 | 	scrub_block_put(sblock); | 
 | 2577 | 	return 0; | 
 | 2578 | } | 
 | 2579 |  | 
| Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 2580 | static void scrub_bio_end_io(struct bio *bio) | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2581 | { | 
 | 2582 | 	struct scrub_bio *sbio = bio->bi_private; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 2583 | 	struct btrfs_fs_info *fs_info = sbio->dev->fs_info; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2584 |  | 
| Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 2585 | 	sbio->err = bio->bi_error; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2586 | 	sbio->bio = bio; | 
 | 2587 |  | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 2588 | 	btrfs_queue_work(fs_info->scrub_workers, &sbio->work); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2589 | } | 
 | 2590 |  | 
 | 2591 | static void scrub_bio_end_io_worker(struct btrfs_work *work) | 
 | 2592 | { | 
 | 2593 | 	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2594 | 	struct scrub_ctx *sctx = sbio->sctx; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2595 | 	int i; | 
 | 2596 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2597 | 	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2598 | 	if (sbio->err) { | 
 | 2599 | 		for (i = 0; i < sbio->page_count; i++) { | 
 | 2600 | 			struct scrub_page *spage = sbio->pagev[i]; | 
 | 2601 |  | 
 | 2602 | 			spage->io_error = 1; | 
 | 2603 | 			spage->sblock->no_io_error_seen = 0; | 
 | 2604 | 		} | 
 | 2605 | 	} | 
 | 2606 |  | 
 | 2607 | 	/* now complete the scrub_block items that have all pages completed */ | 
 | 2608 | 	for (i = 0; i < sbio->page_count; i++) { | 
 | 2609 | 		struct scrub_page *spage = sbio->pagev[i]; | 
 | 2610 | 		struct scrub_block *sblock = spage->sblock; | 
 | 2611 |  | 
 | 2612 | 		if (atomic_dec_and_test(&sblock->outstanding_pages)) | 
 | 2613 | 			scrub_block_complete(sblock); | 
 | 2614 | 		scrub_block_put(sblock); | 
 | 2615 | 	} | 
 | 2616 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2617 | 	bio_put(sbio->bio); | 
 | 2618 | 	sbio->bio = NULL; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2619 | 	spin_lock(&sctx->list_lock); | 
 | 2620 | 	sbio->next_free = sctx->first_free; | 
 | 2621 | 	sctx->first_free = sbio->index; | 
 | 2622 | 	spin_unlock(&sctx->list_lock); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2623 |  | 
 | 2624 | 	if (sctx->is_dev_replace && | 
 | 2625 | 	    atomic_read(&sctx->wr_ctx.flush_all_writes)) { | 
 | 2626 | 		mutex_lock(&sctx->wr_ctx.wr_lock); | 
 | 2627 | 		scrub_wr_submit(sctx); | 
 | 2628 | 		mutex_unlock(&sctx->wr_ctx.wr_lock); | 
 | 2629 | 	} | 
 | 2630 |  | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 2631 | 	scrub_pending_bio_dec(sctx); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2632 | } | 
 | 2633 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2634 | static inline void __scrub_mark_bitmap(struct scrub_parity *sparity, | 
 | 2635 | 				       unsigned long *bitmap, | 
 | 2636 | 				       u64 start, u64 len) | 
 | 2637 | { | 
| Liu Bo | 972d721 | 2017-04-03 13:45:33 -0700 | [diff] [blame] | 2638 | 	u64 offset; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2639 | 	int nsectors; | 
| Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 2640 | 	int sectorsize = sparity->sctx->fs_info->sectorsize; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2641 |  | 
 | 2642 | 	if (len >= sparity->stripe_len) { | 
 | 2643 | 		bitmap_set(bitmap, 0, sparity->nsectors); | 
 | 2644 | 		return; | 
 | 2645 | 	} | 
 | 2646 |  | 
 | 2647 | 	start -= sparity->logic_start; | 
| Liu Bo | 972d721 | 2017-04-03 13:45:33 -0700 | [diff] [blame] | 2648 | 	start = div64_u64_rem(start, sparity->stripe_len, &offset); | 
 | 2649 | 	offset = div_u64(offset, sectorsize); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2650 | 	nsectors = (int)len / sectorsize; | 
 | 2651 |  | 
 | 2652 | 	if (offset + nsectors <= sparity->nsectors) { | 
 | 2653 | 		bitmap_set(bitmap, offset, nsectors); | 
 | 2654 | 		return; | 
 | 2655 | 	} | 
 | 2656 |  | 
 | 2657 | 	bitmap_set(bitmap, offset, sparity->nsectors - offset); | 
 | 2658 | 	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset)); | 
 | 2659 | } | 
 | 2660 |  | 
 | 2661 | static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity, | 
 | 2662 | 						   u64 start, u64 len) | 
 | 2663 | { | 
 | 2664 | 	__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len); | 
 | 2665 | } | 
 | 2666 |  | 
 | 2667 | static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity, | 
 | 2668 | 						  u64 start, u64 len) | 
 | 2669 | { | 
 | 2670 | 	__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len); | 
 | 2671 | } | 
 | 2672 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2673 | static void scrub_block_complete(struct scrub_block *sblock) | 
 | 2674 | { | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2675 | 	int corrupted = 0; | 
 | 2676 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2677 | 	if (!sblock->no_io_error_seen) { | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2678 | 		corrupted = 1; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2679 | 		scrub_handle_errored_block(sblock); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2680 | 	} else { | 
 | 2681 | 		/* | 
 | 2682 | 		 * if has checksum error, write via repair mechanism in | 
 | 2683 | 		 * dev replace case, otherwise write here in dev replace | 
 | 2684 | 		 * case. | 
 | 2685 | 		 */ | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2686 | 		corrupted = scrub_checksum(sblock); | 
 | 2687 | 		if (!corrupted && sblock->sctx->is_dev_replace) | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2688 | 			scrub_write_block_to_dev_replace(sblock); | 
 | 2689 | 	} | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2690 |  | 
 | 2691 | 	if (sblock->sparity && corrupted && !sblock->data_corrected) { | 
 | 2692 | 		u64 start = sblock->pagev[0]->logical; | 
 | 2693 | 		u64 end = sblock->pagev[sblock->page_count - 1]->logical + | 
 | 2694 | 			  PAGE_SIZE; | 
 | 2695 |  | 
 | 2696 | 		scrub_parity_mark_sectors_error(sblock->sparity, | 
 | 2697 | 						start, end - start); | 
 | 2698 | 	} | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2699 | } | 
 | 2700 |  | 
| Zhao Lei | 3b5753e | 2015-08-24 22:03:02 +0800 | [diff] [blame] | 2701 | static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2702 | { | 
 | 2703 | 	struct btrfs_ordered_sum *sum = NULL; | 
| Miao Xie | f51a4a1 | 2013-06-19 10:36:09 +0800 | [diff] [blame] | 2704 | 	unsigned long index; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2705 | 	unsigned long num_sectors; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2706 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2707 | 	while (!list_empty(&sctx->csum_list)) { | 
 | 2708 | 		sum = list_first_entry(&sctx->csum_list, | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2709 | 				       struct btrfs_ordered_sum, list); | 
 | 2710 | 		if (sum->bytenr > logical) | 
 | 2711 | 			return 0; | 
 | 2712 | 		if (sum->bytenr + sum->len > logical) | 
 | 2713 | 			break; | 
 | 2714 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2715 | 		++sctx->stat.csum_discards; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2716 | 		list_del(&sum->list); | 
 | 2717 | 		kfree(sum); | 
 | 2718 | 		sum = NULL; | 
 | 2719 | 	} | 
 | 2720 | 	if (!sum) | 
 | 2721 | 		return 0; | 
 | 2722 |  | 
| David Sterba | 25cc122 | 2017-05-16 19:10:41 +0200 | [diff] [blame^] | 2723 | 	index = ((u32)(logical - sum->bytenr)) / sctx->fs_info->sectorsize; | 
 | 2724 | 	num_sectors = sum->len / sctx->fs_info->sectorsize; | 
| Miao Xie | f51a4a1 | 2013-06-19 10:36:09 +0800 | [diff] [blame] | 2725 | 	memcpy(csum, sum->sums + index, sctx->csum_size); | 
 | 2726 | 	if (index == num_sectors - 1) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2727 | 		list_del(&sum->list); | 
 | 2728 | 		kfree(sum); | 
 | 2729 | 	} | 
| Miao Xie | f51a4a1 | 2013-06-19 10:36:09 +0800 | [diff] [blame] | 2730 | 	return 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2731 | } | 
 | 2732 |  | 
 | 2733 | /* scrub extent tries to collect up to 64 kB for each bio */ | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2734 | static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len, | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2735 | 			u64 physical, struct btrfs_device *dev, u64 flags, | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2736 | 			u64 gen, int mirror_num, u64 physical_for_dev_replace) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2737 | { | 
 | 2738 | 	int ret; | 
 | 2739 | 	u8 csum[BTRFS_CSUM_SIZE]; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2740 | 	u32 blocksize; | 
 | 2741 |  | 
 | 2742 | 	if (flags & BTRFS_EXTENT_FLAG_DATA) { | 
| David Sterba | 25cc122 | 2017-05-16 19:10:41 +0200 | [diff] [blame^] | 2743 | 		blocksize = sctx->fs_info->sectorsize; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2744 | 		spin_lock(&sctx->stat_lock); | 
 | 2745 | 		sctx->stat.data_extents_scrubbed++; | 
 | 2746 | 		sctx->stat.data_bytes_scrubbed += len; | 
 | 2747 | 		spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2748 | 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | 
| David Sterba | 25cc122 | 2017-05-16 19:10:41 +0200 | [diff] [blame^] | 2749 | 		blocksize = sctx->fs_info->nodesize; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2750 | 		spin_lock(&sctx->stat_lock); | 
 | 2751 | 		sctx->stat.tree_extents_scrubbed++; | 
 | 2752 | 		sctx->stat.tree_bytes_scrubbed += len; | 
 | 2753 | 		spin_unlock(&sctx->stat_lock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2754 | 	} else { | 
| David Sterba | 25cc122 | 2017-05-16 19:10:41 +0200 | [diff] [blame^] | 2755 | 		blocksize = sctx->fs_info->sectorsize; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2756 | 		WARN_ON(1); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2757 | 	} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2758 |  | 
 | 2759 | 	while (len) { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2760 | 		u64 l = min_t(u64, len, blocksize); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2761 | 		int have_csum = 0; | 
 | 2762 |  | 
 | 2763 | 		if (flags & BTRFS_EXTENT_FLAG_DATA) { | 
 | 2764 | 			/* push csums to sbio */ | 
| Zhao Lei | 3b5753e | 2015-08-24 22:03:02 +0800 | [diff] [blame] | 2765 | 			have_csum = scrub_find_csum(sctx, logical, csum); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2766 | 			if (have_csum == 0) | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2767 | 				++sctx->stat.no_csum; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2768 | 			if (sctx->is_dev_replace && !have_csum) { | 
 | 2769 | 				ret = copy_nocow_pages(sctx, logical, l, | 
 | 2770 | 						       mirror_num, | 
 | 2771 | 						      physical_for_dev_replace); | 
 | 2772 | 				goto behind_scrub_pages; | 
 | 2773 | 			} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2774 | 		} | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2775 | 		ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2776 | 				  mirror_num, have_csum ? csum : NULL, 0, | 
 | 2777 | 				  physical_for_dev_replace); | 
 | 2778 | behind_scrub_pages: | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2779 | 		if (ret) | 
 | 2780 | 			return ret; | 
 | 2781 | 		len -= l; | 
 | 2782 | 		logical += l; | 
 | 2783 | 		physical += l; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2784 | 		physical_for_dev_replace += l; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2785 | 	} | 
 | 2786 | 	return 0; | 
 | 2787 | } | 
 | 2788 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2789 | static int scrub_pages_for_parity(struct scrub_parity *sparity, | 
 | 2790 | 				  u64 logical, u64 len, | 
 | 2791 | 				  u64 physical, struct btrfs_device *dev, | 
 | 2792 | 				  u64 flags, u64 gen, int mirror_num, u8 *csum) | 
 | 2793 | { | 
 | 2794 | 	struct scrub_ctx *sctx = sparity->sctx; | 
 | 2795 | 	struct scrub_block *sblock; | 
 | 2796 | 	int index; | 
 | 2797 |  | 
| David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2798 | 	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2799 | 	if (!sblock) { | 
 | 2800 | 		spin_lock(&sctx->stat_lock); | 
 | 2801 | 		sctx->stat.malloc_errors++; | 
 | 2802 | 		spin_unlock(&sctx->stat_lock); | 
 | 2803 | 		return -ENOMEM; | 
 | 2804 | 	} | 
 | 2805 |  | 
 | 2806 | 	/* one ref inside this function, plus one for each page added to | 
 | 2807 | 	 * a bio later on */ | 
| Elena Reshetova | 186debd | 2017-03-03 10:55:23 +0200 | [diff] [blame] | 2808 | 	refcount_set(&sblock->refs, 1); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2809 | 	sblock->sctx = sctx; | 
 | 2810 | 	sblock->no_io_error_seen = 1; | 
 | 2811 | 	sblock->sparity = sparity; | 
 | 2812 | 	scrub_parity_get(sparity); | 
 | 2813 |  | 
 | 2814 | 	for (index = 0; len > 0; index++) { | 
 | 2815 | 		struct scrub_page *spage; | 
 | 2816 | 		u64 l = min_t(u64, len, PAGE_SIZE); | 
 | 2817 |  | 
| David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2818 | 		spage = kzalloc(sizeof(*spage), GFP_KERNEL); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2819 | 		if (!spage) { | 
 | 2820 | leave_nomem: | 
 | 2821 | 			spin_lock(&sctx->stat_lock); | 
 | 2822 | 			sctx->stat.malloc_errors++; | 
 | 2823 | 			spin_unlock(&sctx->stat_lock); | 
 | 2824 | 			scrub_block_put(sblock); | 
 | 2825 | 			return -ENOMEM; | 
 | 2826 | 		} | 
 | 2827 | 		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); | 
 | 2828 | 		/* For scrub block */ | 
 | 2829 | 		scrub_page_get(spage); | 
 | 2830 | 		sblock->pagev[index] = spage; | 
 | 2831 | 		/* For scrub parity */ | 
 | 2832 | 		scrub_page_get(spage); | 
 | 2833 | 		list_add_tail(&spage->list, &sparity->spages); | 
 | 2834 | 		spage->sblock = sblock; | 
 | 2835 | 		spage->dev = dev; | 
 | 2836 | 		spage->flags = flags; | 
 | 2837 | 		spage->generation = gen; | 
 | 2838 | 		spage->logical = logical; | 
 | 2839 | 		spage->physical = physical; | 
 | 2840 | 		spage->mirror_num = mirror_num; | 
 | 2841 | 		if (csum) { | 
 | 2842 | 			spage->have_csum = 1; | 
 | 2843 | 			memcpy(spage->csum, csum, sctx->csum_size); | 
 | 2844 | 		} else { | 
 | 2845 | 			spage->have_csum = 0; | 
 | 2846 | 		} | 
 | 2847 | 		sblock->page_count++; | 
| David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2848 | 		spage->page = alloc_page(GFP_KERNEL); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2849 | 		if (!spage->page) | 
 | 2850 | 			goto leave_nomem; | 
 | 2851 | 		len -= l; | 
 | 2852 | 		logical += l; | 
 | 2853 | 		physical += l; | 
 | 2854 | 	} | 
 | 2855 |  | 
 | 2856 | 	WARN_ON(sblock->page_count == 0); | 
 | 2857 | 	for (index = 0; index < sblock->page_count; index++) { | 
 | 2858 | 		struct scrub_page *spage = sblock->pagev[index]; | 
 | 2859 | 		int ret; | 
 | 2860 |  | 
 | 2861 | 		ret = scrub_add_page_to_rd_bio(sctx, spage); | 
 | 2862 | 		if (ret) { | 
 | 2863 | 			scrub_block_put(sblock); | 
 | 2864 | 			return ret; | 
 | 2865 | 		} | 
 | 2866 | 	} | 
 | 2867 |  | 
 | 2868 | 	/* last one frees, either here or in bio completion for last page */ | 
 | 2869 | 	scrub_block_put(sblock); | 
 | 2870 | 	return 0; | 
 | 2871 | } | 
 | 2872 |  | 
 | 2873 | static int scrub_extent_for_parity(struct scrub_parity *sparity, | 
 | 2874 | 				   u64 logical, u64 len, | 
 | 2875 | 				   u64 physical, struct btrfs_device *dev, | 
 | 2876 | 				   u64 flags, u64 gen, int mirror_num) | 
 | 2877 | { | 
 | 2878 | 	struct scrub_ctx *sctx = sparity->sctx; | 
 | 2879 | 	int ret; | 
 | 2880 | 	u8 csum[BTRFS_CSUM_SIZE]; | 
 | 2881 | 	u32 blocksize; | 
 | 2882 |  | 
| Omar Sandoval | 4a77089 | 2015-06-19 11:52:52 -0700 | [diff] [blame] | 2883 | 	if (dev->missing) { | 
 | 2884 | 		scrub_parity_mark_sectors_error(sparity, logical, len); | 
 | 2885 | 		return 0; | 
 | 2886 | 	} | 
 | 2887 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2888 | 	if (flags & BTRFS_EXTENT_FLAG_DATA) { | 
| David Sterba | 25cc122 | 2017-05-16 19:10:41 +0200 | [diff] [blame^] | 2889 | 		blocksize = sctx->fs_info->sectorsize; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2890 | 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | 
| David Sterba | 25cc122 | 2017-05-16 19:10:41 +0200 | [diff] [blame^] | 2891 | 		blocksize = sctx->fs_info->nodesize; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2892 | 	} else { | 
| David Sterba | 25cc122 | 2017-05-16 19:10:41 +0200 | [diff] [blame^] | 2893 | 		blocksize = sctx->fs_info->sectorsize; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2894 | 		WARN_ON(1); | 
 | 2895 | 	} | 
 | 2896 |  | 
 | 2897 | 	while (len) { | 
 | 2898 | 		u64 l = min_t(u64, len, blocksize); | 
 | 2899 | 		int have_csum = 0; | 
 | 2900 |  | 
 | 2901 | 		if (flags & BTRFS_EXTENT_FLAG_DATA) { | 
 | 2902 | 			/* push csums to sbio */ | 
| Zhao Lei | 3b5753e | 2015-08-24 22:03:02 +0800 | [diff] [blame] | 2903 | 			have_csum = scrub_find_csum(sctx, logical, csum); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2904 | 			if (have_csum == 0) | 
 | 2905 | 				goto skip; | 
 | 2906 | 		} | 
 | 2907 | 		ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, | 
 | 2908 | 					     flags, gen, mirror_num, | 
 | 2909 | 					     have_csum ? csum : NULL); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2910 | 		if (ret) | 
 | 2911 | 			return ret; | 
| Dan Carpenter | 6b6d24b | 2014-12-12 22:30:00 +0300 | [diff] [blame] | 2912 | skip: | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2913 | 		len -= l; | 
 | 2914 | 		logical += l; | 
 | 2915 | 		physical += l; | 
 | 2916 | 	} | 
 | 2917 | 	return 0; | 
 | 2918 | } | 
 | 2919 |  | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2920 | /* | 
 | 2921 |  * Given a physical address, this will calculate it's | 
 | 2922 |  * logical offset. if this is a parity stripe, it will return | 
 | 2923 |  * the most left data stripe's logical offset. | 
 | 2924 |  * | 
 | 2925 |  * return 0 if it is a data stripe, 1 means parity stripe. | 
 | 2926 |  */ | 
 | 2927 | static int get_raid56_logic_offset(u64 physical, int num, | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2928 | 				   struct map_lookup *map, u64 *offset, | 
 | 2929 | 				   u64 *stripe_start) | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2930 | { | 
 | 2931 | 	int i; | 
 | 2932 | 	int j = 0; | 
 | 2933 | 	u64 stripe_nr; | 
 | 2934 | 	u64 last_offset; | 
| David Sterba | 9d644a6 | 2015-02-20 18:42:11 +0100 | [diff] [blame] | 2935 | 	u32 stripe_index; | 
 | 2936 | 	u32 rot; | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2937 |  | 
 | 2938 | 	last_offset = (physical - map->stripes[num].physical) * | 
 | 2939 | 		      nr_data_stripes(map); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2940 | 	if (stripe_start) | 
 | 2941 | 		*stripe_start = last_offset; | 
 | 2942 |  | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2943 | 	*offset = last_offset; | 
 | 2944 | 	for (i = 0; i < nr_data_stripes(map); i++) { | 
 | 2945 | 		*offset = last_offset + i * map->stripe_len; | 
 | 2946 |  | 
| Liu Bo | 42c61ab | 2017-04-03 13:45:24 -0700 | [diff] [blame] | 2947 | 		stripe_nr = div64_u64(*offset, map->stripe_len); | 
| David Sterba | b8b93ad | 2015-01-16 17:26:13 +0100 | [diff] [blame] | 2948 | 		stripe_nr = div_u64(stripe_nr, nr_data_stripes(map)); | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2949 |  | 
 | 2950 | 		/* Work out the disk rotation on this stripe-set */ | 
| David Sterba | 47c5713 | 2015-02-20 18:43:47 +0100 | [diff] [blame] | 2951 | 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot); | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2952 | 		/* calculate which stripe this data locates */ | 
 | 2953 | 		rot += i; | 
| Wang Shilong | e4fbaee | 2014-04-11 18:32:25 +0800 | [diff] [blame] | 2954 | 		stripe_index = rot % map->num_stripes; | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2955 | 		if (stripe_index == num) | 
 | 2956 | 			return 0; | 
 | 2957 | 		if (stripe_index < num) | 
 | 2958 | 			j++; | 
 | 2959 | 	} | 
 | 2960 | 	*offset = last_offset + j * map->stripe_len; | 
 | 2961 | 	return 1; | 
 | 2962 | } | 
 | 2963 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2964 | static void scrub_free_parity(struct scrub_parity *sparity) | 
 | 2965 | { | 
 | 2966 | 	struct scrub_ctx *sctx = sparity->sctx; | 
 | 2967 | 	struct scrub_page *curr, *next; | 
 | 2968 | 	int nbits; | 
 | 2969 |  | 
 | 2970 | 	nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors); | 
 | 2971 | 	if (nbits) { | 
 | 2972 | 		spin_lock(&sctx->stat_lock); | 
 | 2973 | 		sctx->stat.read_errors += nbits; | 
 | 2974 | 		sctx->stat.uncorrectable_errors += nbits; | 
 | 2975 | 		spin_unlock(&sctx->stat_lock); | 
 | 2976 | 	} | 
 | 2977 |  | 
 | 2978 | 	list_for_each_entry_safe(curr, next, &sparity->spages, list) { | 
 | 2979 | 		list_del_init(&curr->list); | 
 | 2980 | 		scrub_page_put(curr); | 
 | 2981 | 	} | 
 | 2982 |  | 
 | 2983 | 	kfree(sparity); | 
 | 2984 | } | 
 | 2985 |  | 
| Zhao Lei | 20b2e30 | 2015-06-04 20:09:15 +0800 | [diff] [blame] | 2986 | static void scrub_parity_bio_endio_worker(struct btrfs_work *work) | 
 | 2987 | { | 
 | 2988 | 	struct scrub_parity *sparity = container_of(work, struct scrub_parity, | 
 | 2989 | 						    work); | 
 | 2990 | 	struct scrub_ctx *sctx = sparity->sctx; | 
 | 2991 |  | 
 | 2992 | 	scrub_free_parity(sparity); | 
 | 2993 | 	scrub_pending_bio_dec(sctx); | 
 | 2994 | } | 
 | 2995 |  | 
| Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 2996 | static void scrub_parity_bio_endio(struct bio *bio) | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2997 | { | 
 | 2998 | 	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2999 | 	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3000 |  | 
| Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 3001 | 	if (bio->bi_error) | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3002 | 		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, | 
 | 3003 | 			  sparity->nsectors); | 
 | 3004 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3005 | 	bio_put(bio); | 
| Zhao Lei | 20b2e30 | 2015-06-04 20:09:15 +0800 | [diff] [blame] | 3006 |  | 
 | 3007 | 	btrfs_init_work(&sparity->work, btrfs_scrubparity_helper, | 
 | 3008 | 			scrub_parity_bio_endio_worker, NULL, NULL); | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3009 | 	btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3010 | } | 
 | 3011 |  | 
 | 3012 | static void scrub_parity_check_and_repair(struct scrub_parity *sparity) | 
 | 3013 | { | 
 | 3014 | 	struct scrub_ctx *sctx = sparity->sctx; | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3015 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3016 | 	struct bio *bio; | 
 | 3017 | 	struct btrfs_raid_bio *rbio; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3018 | 	struct btrfs_bio *bbio = NULL; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3019 | 	u64 length; | 
 | 3020 | 	int ret; | 
 | 3021 |  | 
 | 3022 | 	if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap, | 
 | 3023 | 			   sparity->nsectors)) | 
 | 3024 | 		goto out; | 
 | 3025 |  | 
| Zhao Lei | a0dd59d | 2015-07-21 15:42:26 +0800 | [diff] [blame] | 3026 | 	length = sparity->logic_end - sparity->logic_start; | 
| Qu Wenruo | ae6529c | 2017-03-29 09:33:21 +0800 | [diff] [blame] | 3027 |  | 
 | 3028 | 	btrfs_bio_counter_inc_blocked(fs_info); | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3029 | 	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, | 
| David Sterba | 825ad4c | 2017-03-28 14:45:22 +0200 | [diff] [blame] | 3030 | 			       &length, &bbio); | 
| Zhao Lei | 8e5cfb5 | 2015-01-20 15:11:33 +0800 | [diff] [blame] | 3031 | 	if (ret || !bbio || !bbio->raid_map) | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3032 | 		goto bbio_out; | 
 | 3033 |  | 
 | 3034 | 	bio = btrfs_io_bio_alloc(GFP_NOFS, 0); | 
 | 3035 | 	if (!bio) | 
 | 3036 | 		goto bbio_out; | 
 | 3037 |  | 
 | 3038 | 	bio->bi_iter.bi_sector = sparity->logic_start >> 9; | 
 | 3039 | 	bio->bi_private = sparity; | 
 | 3040 | 	bio->bi_end_io = scrub_parity_bio_endio; | 
 | 3041 |  | 
| Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 3042 | 	rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio, | 
| Zhao Lei | 8e5cfb5 | 2015-01-20 15:11:33 +0800 | [diff] [blame] | 3043 | 					      length, sparity->scrub_dev, | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3044 | 					      sparity->dbitmap, | 
 | 3045 | 					      sparity->nsectors); | 
 | 3046 | 	if (!rbio) | 
 | 3047 | 		goto rbio_out; | 
 | 3048 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3049 | 	scrub_pending_bio_inc(sctx); | 
 | 3050 | 	raid56_parity_submit_scrub_rbio(rbio); | 
 | 3051 | 	return; | 
 | 3052 |  | 
 | 3053 | rbio_out: | 
 | 3054 | 	bio_put(bio); | 
 | 3055 | bbio_out: | 
| Qu Wenruo | ae6529c | 2017-03-29 09:33:21 +0800 | [diff] [blame] | 3056 | 	btrfs_bio_counter_dec(fs_info); | 
| Zhao Lei | 6e9606d | 2015-01-20 15:11:34 +0800 | [diff] [blame] | 3057 | 	btrfs_put_bbio(bbio); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3058 | 	bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, | 
 | 3059 | 		  sparity->nsectors); | 
 | 3060 | 	spin_lock(&sctx->stat_lock); | 
 | 3061 | 	sctx->stat.malloc_errors++; | 
 | 3062 | 	spin_unlock(&sctx->stat_lock); | 
 | 3063 | out: | 
 | 3064 | 	scrub_free_parity(sparity); | 
 | 3065 | } | 
 | 3066 |  | 
 | 3067 | static inline int scrub_calc_parity_bitmap_len(int nsectors) | 
 | 3068 | { | 
| Zhao Lei | bfca9a6 | 2014-12-08 19:55:57 +0800 | [diff] [blame] | 3069 | 	return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3070 | } | 
 | 3071 |  | 
 | 3072 | static void scrub_parity_get(struct scrub_parity *sparity) | 
 | 3073 | { | 
| Elena Reshetova | 78a7645 | 2017-03-03 10:55:24 +0200 | [diff] [blame] | 3074 | 	refcount_inc(&sparity->refs); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3075 | } | 
 | 3076 |  | 
 | 3077 | static void scrub_parity_put(struct scrub_parity *sparity) | 
 | 3078 | { | 
| Elena Reshetova | 78a7645 | 2017-03-03 10:55:24 +0200 | [diff] [blame] | 3079 | 	if (!refcount_dec_and_test(&sparity->refs)) | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3080 | 		return; | 
 | 3081 |  | 
 | 3082 | 	scrub_parity_check_and_repair(sparity); | 
 | 3083 | } | 
 | 3084 |  | 
 | 3085 | static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, | 
 | 3086 | 						  struct map_lookup *map, | 
 | 3087 | 						  struct btrfs_device *sdev, | 
 | 3088 | 						  struct btrfs_path *path, | 
 | 3089 | 						  u64 logic_start, | 
 | 3090 | 						  u64 logic_end) | 
 | 3091 | { | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 3092 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3093 | 	struct btrfs_root *root = fs_info->extent_root; | 
 | 3094 | 	struct btrfs_root *csum_root = fs_info->csum_root; | 
 | 3095 | 	struct btrfs_extent_item *extent; | 
| Omar Sandoval | 4a77089 | 2015-06-19 11:52:52 -0700 | [diff] [blame] | 3096 | 	struct btrfs_bio *bbio = NULL; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3097 | 	u64 flags; | 
 | 3098 | 	int ret; | 
 | 3099 | 	int slot; | 
 | 3100 | 	struct extent_buffer *l; | 
 | 3101 | 	struct btrfs_key key; | 
 | 3102 | 	u64 generation; | 
 | 3103 | 	u64 extent_logical; | 
 | 3104 | 	u64 extent_physical; | 
 | 3105 | 	u64 extent_len; | 
| Omar Sandoval | 4a77089 | 2015-06-19 11:52:52 -0700 | [diff] [blame] | 3106 | 	u64 mapped_length; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3107 | 	struct btrfs_device *extent_dev; | 
 | 3108 | 	struct scrub_parity *sparity; | 
 | 3109 | 	int nsectors; | 
 | 3110 | 	int bitmap_len; | 
 | 3111 | 	int extent_mirror_num; | 
 | 3112 | 	int stop_loop = 0; | 
 | 3113 |  | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3114 | 	nsectors = div_u64(map->stripe_len, fs_info->sectorsize); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3115 | 	bitmap_len = scrub_calc_parity_bitmap_len(nsectors); | 
 | 3116 | 	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len, | 
 | 3117 | 			  GFP_NOFS); | 
 | 3118 | 	if (!sparity) { | 
 | 3119 | 		spin_lock(&sctx->stat_lock); | 
 | 3120 | 		sctx->stat.malloc_errors++; | 
 | 3121 | 		spin_unlock(&sctx->stat_lock); | 
 | 3122 | 		return -ENOMEM; | 
 | 3123 | 	} | 
 | 3124 |  | 
 | 3125 | 	sparity->stripe_len = map->stripe_len; | 
 | 3126 | 	sparity->nsectors = nsectors; | 
 | 3127 | 	sparity->sctx = sctx; | 
 | 3128 | 	sparity->scrub_dev = sdev; | 
 | 3129 | 	sparity->logic_start = logic_start; | 
 | 3130 | 	sparity->logic_end = logic_end; | 
| Elena Reshetova | 78a7645 | 2017-03-03 10:55:24 +0200 | [diff] [blame] | 3131 | 	refcount_set(&sparity->refs, 1); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3132 | 	INIT_LIST_HEAD(&sparity->spages); | 
 | 3133 | 	sparity->dbitmap = sparity->bitmap; | 
 | 3134 | 	sparity->ebitmap = (void *)sparity->bitmap + bitmap_len; | 
 | 3135 |  | 
 | 3136 | 	ret = 0; | 
 | 3137 | 	while (logic_start < logic_end) { | 
 | 3138 | 		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) | 
 | 3139 | 			key.type = BTRFS_METADATA_ITEM_KEY; | 
 | 3140 | 		else | 
 | 3141 | 			key.type = BTRFS_EXTENT_ITEM_KEY; | 
 | 3142 | 		key.objectid = logic_start; | 
 | 3143 | 		key.offset = (u64)-1; | 
 | 3144 |  | 
 | 3145 | 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 
 | 3146 | 		if (ret < 0) | 
 | 3147 | 			goto out; | 
 | 3148 |  | 
 | 3149 | 		if (ret > 0) { | 
 | 3150 | 			ret = btrfs_previous_extent_item(root, path, 0); | 
 | 3151 | 			if (ret < 0) | 
 | 3152 | 				goto out; | 
 | 3153 | 			if (ret > 0) { | 
 | 3154 | 				btrfs_release_path(path); | 
 | 3155 | 				ret = btrfs_search_slot(NULL, root, &key, | 
 | 3156 | 							path, 0, 0); | 
 | 3157 | 				if (ret < 0) | 
 | 3158 | 					goto out; | 
 | 3159 | 			} | 
 | 3160 | 		} | 
 | 3161 |  | 
 | 3162 | 		stop_loop = 0; | 
 | 3163 | 		while (1) { | 
 | 3164 | 			u64 bytes; | 
 | 3165 |  | 
 | 3166 | 			l = path->nodes[0]; | 
 | 3167 | 			slot = path->slots[0]; | 
 | 3168 | 			if (slot >= btrfs_header_nritems(l)) { | 
 | 3169 | 				ret = btrfs_next_leaf(root, path); | 
 | 3170 | 				if (ret == 0) | 
 | 3171 | 					continue; | 
 | 3172 | 				if (ret < 0) | 
 | 3173 | 					goto out; | 
 | 3174 |  | 
 | 3175 | 				stop_loop = 1; | 
 | 3176 | 				break; | 
 | 3177 | 			} | 
 | 3178 | 			btrfs_item_key_to_cpu(l, &key, slot); | 
 | 3179 |  | 
| Zhao Lei | d7cad23 | 2015-07-22 13:14:48 +0800 | [diff] [blame] | 3180 | 			if (key.type != BTRFS_EXTENT_ITEM_KEY && | 
 | 3181 | 			    key.type != BTRFS_METADATA_ITEM_KEY) | 
 | 3182 | 				goto next; | 
 | 3183 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3184 | 			if (key.type == BTRFS_METADATA_ITEM_KEY) | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3185 | 				bytes = fs_info->nodesize; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3186 | 			else | 
 | 3187 | 				bytes = key.offset; | 
 | 3188 |  | 
 | 3189 | 			if (key.objectid + bytes <= logic_start) | 
 | 3190 | 				goto next; | 
 | 3191 |  | 
| Zhao Lei | a0dd59d | 2015-07-21 15:42:26 +0800 | [diff] [blame] | 3192 | 			if (key.objectid >= logic_end) { | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3193 | 				stop_loop = 1; | 
 | 3194 | 				break; | 
 | 3195 | 			} | 
 | 3196 |  | 
 | 3197 | 			while (key.objectid >= logic_start + map->stripe_len) | 
 | 3198 | 				logic_start += map->stripe_len; | 
 | 3199 |  | 
 | 3200 | 			extent = btrfs_item_ptr(l, slot, | 
 | 3201 | 						struct btrfs_extent_item); | 
 | 3202 | 			flags = btrfs_extent_flags(l, extent); | 
 | 3203 | 			generation = btrfs_extent_generation(l, extent); | 
 | 3204 |  | 
| Zhao Lei | a323e81 | 2015-07-23 12:29:49 +0800 | [diff] [blame] | 3205 | 			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && | 
 | 3206 | 			    (key.objectid < logic_start || | 
 | 3207 | 			     key.objectid + bytes > | 
 | 3208 | 			     logic_start + map->stripe_len)) { | 
| Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 3209 | 				btrfs_err(fs_info, | 
 | 3210 | 					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu", | 
| Zhao Lei | a323e81 | 2015-07-23 12:29:49 +0800 | [diff] [blame] | 3211 | 					  key.objectid, logic_start); | 
| Zhao Lei | 9799d2c3 | 2015-08-25 21:31:40 +0800 | [diff] [blame] | 3212 | 				spin_lock(&sctx->stat_lock); | 
 | 3213 | 				sctx->stat.uncorrectable_errors++; | 
 | 3214 | 				spin_unlock(&sctx->stat_lock); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3215 | 				goto next; | 
 | 3216 | 			} | 
 | 3217 | again: | 
 | 3218 | 			extent_logical = key.objectid; | 
 | 3219 | 			extent_len = bytes; | 
 | 3220 |  | 
 | 3221 | 			if (extent_logical < logic_start) { | 
 | 3222 | 				extent_len -= logic_start - extent_logical; | 
 | 3223 | 				extent_logical = logic_start; | 
 | 3224 | 			} | 
 | 3225 |  | 
 | 3226 | 			if (extent_logical + extent_len > | 
 | 3227 | 			    logic_start + map->stripe_len) | 
 | 3228 | 				extent_len = logic_start + map->stripe_len - | 
 | 3229 | 					     extent_logical; | 
 | 3230 |  | 
 | 3231 | 			scrub_parity_mark_sectors_data(sparity, extent_logical, | 
 | 3232 | 						       extent_len); | 
 | 3233 |  | 
| Omar Sandoval | 4a77089 | 2015-06-19 11:52:52 -0700 | [diff] [blame] | 3234 | 			mapped_length = extent_len; | 
| Zhao Lei | f1fee65 | 2016-05-17 17:37:38 +0800 | [diff] [blame] | 3235 | 			bbio = NULL; | 
| Christoph Hellwig | cf8cddd | 2016-10-27 09:27:36 +0200 | [diff] [blame] | 3236 | 			ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, | 
 | 3237 | 					extent_logical, &mapped_length, &bbio, | 
 | 3238 | 					0); | 
| Omar Sandoval | 4a77089 | 2015-06-19 11:52:52 -0700 | [diff] [blame] | 3239 | 			if (!ret) { | 
 | 3240 | 				if (!bbio || mapped_length < extent_len) | 
 | 3241 | 					ret = -EIO; | 
 | 3242 | 			} | 
 | 3243 | 			if (ret) { | 
 | 3244 | 				btrfs_put_bbio(bbio); | 
 | 3245 | 				goto out; | 
 | 3246 | 			} | 
 | 3247 | 			extent_physical = bbio->stripes[0].physical; | 
 | 3248 | 			extent_mirror_num = bbio->mirror_num; | 
 | 3249 | 			extent_dev = bbio->stripes[0].dev; | 
 | 3250 | 			btrfs_put_bbio(bbio); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3251 |  | 
 | 3252 | 			ret = btrfs_lookup_csums_range(csum_root, | 
 | 3253 | 						extent_logical, | 
 | 3254 | 						extent_logical + extent_len - 1, | 
 | 3255 | 						&sctx->csum_list, 1); | 
 | 3256 | 			if (ret) | 
 | 3257 | 				goto out; | 
 | 3258 |  | 
 | 3259 | 			ret = scrub_extent_for_parity(sparity, extent_logical, | 
 | 3260 | 						      extent_len, | 
 | 3261 | 						      extent_physical, | 
 | 3262 | 						      extent_dev, flags, | 
 | 3263 | 						      generation, | 
 | 3264 | 						      extent_mirror_num); | 
| Zhao Lei | 6fa96d7 | 2015-07-21 12:22:30 +0800 | [diff] [blame] | 3265 |  | 
 | 3266 | 			scrub_free_csums(sctx); | 
 | 3267 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3268 | 			if (ret) | 
 | 3269 | 				goto out; | 
 | 3270 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3271 | 			if (extent_logical + extent_len < | 
 | 3272 | 			    key.objectid + bytes) { | 
 | 3273 | 				logic_start += map->stripe_len; | 
 | 3274 |  | 
 | 3275 | 				if (logic_start >= logic_end) { | 
 | 3276 | 					stop_loop = 1; | 
 | 3277 | 					break; | 
 | 3278 | 				} | 
 | 3279 |  | 
 | 3280 | 				if (logic_start < key.objectid + bytes) { | 
 | 3281 | 					cond_resched(); | 
 | 3282 | 					goto again; | 
 | 3283 | 				} | 
 | 3284 | 			} | 
 | 3285 | next: | 
 | 3286 | 			path->slots[0]++; | 
 | 3287 | 		} | 
 | 3288 |  | 
 | 3289 | 		btrfs_release_path(path); | 
 | 3290 |  | 
 | 3291 | 		if (stop_loop) | 
 | 3292 | 			break; | 
 | 3293 |  | 
 | 3294 | 		logic_start += map->stripe_len; | 
 | 3295 | 	} | 
 | 3296 | out: | 
 | 3297 | 	if (ret < 0) | 
 | 3298 | 		scrub_parity_mark_sectors_error(sparity, logic_start, | 
| Zhao Lei | a0dd59d | 2015-07-21 15:42:26 +0800 | [diff] [blame] | 3299 | 						logic_end - logic_start); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3300 | 	scrub_parity_put(sparity); | 
 | 3301 | 	scrub_submit(sctx); | 
 | 3302 | 	mutex_lock(&sctx->wr_ctx.wr_lock); | 
 | 3303 | 	scrub_wr_submit(sctx); | 
 | 3304 | 	mutex_unlock(&sctx->wr_ctx.wr_lock); | 
 | 3305 |  | 
 | 3306 | 	btrfs_release_path(path); | 
 | 3307 | 	return ret < 0 ? ret : 0; | 
 | 3308 | } | 
 | 3309 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3310 | static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3311 | 					   struct map_lookup *map, | 
 | 3312 | 					   struct btrfs_device *scrub_dev, | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3313 | 					   int num, u64 base, u64 length, | 
 | 3314 | 					   int is_dev_replace) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3315 | { | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3316 | 	struct btrfs_path *path, *ppath; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 3317 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3318 | 	struct btrfs_root *root = fs_info->extent_root; | 
 | 3319 | 	struct btrfs_root *csum_root = fs_info->csum_root; | 
 | 3320 | 	struct btrfs_extent_item *extent; | 
| Arne Jansen | e7786c3 | 2011-05-28 20:58:38 +0000 | [diff] [blame] | 3321 | 	struct blk_plug plug; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3322 | 	u64 flags; | 
 | 3323 | 	int ret; | 
 | 3324 | 	int slot; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3325 | 	u64 nstripes; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3326 | 	struct extent_buffer *l; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3327 | 	u64 physical; | 
 | 3328 | 	u64 logical; | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3329 | 	u64 logic_end; | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3330 | 	u64 physical_end; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3331 | 	u64 generation; | 
| Jan Schmidt | e12fa9c | 2011-06-17 15:55:21 +0200 | [diff] [blame] | 3332 | 	int mirror_num; | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3333 | 	struct reada_control *reada1; | 
 | 3334 | 	struct reada_control *reada2; | 
| David Sterba | e6c11f9 | 2016-03-24 18:00:53 +0100 | [diff] [blame] | 3335 | 	struct btrfs_key key; | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3336 | 	struct btrfs_key key_end; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3337 | 	u64 increment = map->stripe_len; | 
 | 3338 | 	u64 offset; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3339 | 	u64 extent_logical; | 
 | 3340 | 	u64 extent_physical; | 
 | 3341 | 	u64 extent_len; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3342 | 	u64 stripe_logical; | 
 | 3343 | 	u64 stripe_end; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3344 | 	struct btrfs_device *extent_dev; | 
 | 3345 | 	int extent_mirror_num; | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3346 | 	int stop_loop = 0; | 
| David Woodhouse | 53b381b | 2013-01-29 18:40:14 -0500 | [diff] [blame] | 3347 |  | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3348 | 	physical = map->stripes[num].physical; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3349 | 	offset = 0; | 
| Liu Bo | 42c61ab | 2017-04-03 13:45:24 -0700 | [diff] [blame] | 3350 | 	nstripes = div64_u64(length, map->stripe_len); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3351 | 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) { | 
 | 3352 | 		offset = map->stripe_len * num; | 
 | 3353 | 		increment = map->stripe_len * map->num_stripes; | 
| Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 3354 | 		mirror_num = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3355 | 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { | 
 | 3356 | 		int factor = map->num_stripes / map->sub_stripes; | 
 | 3357 | 		offset = map->stripe_len * (num / map->sub_stripes); | 
 | 3358 | 		increment = map->stripe_len * factor; | 
| Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 3359 | 		mirror_num = num % map->sub_stripes + 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3360 | 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { | 
 | 3361 | 		increment = map->stripe_len; | 
| Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 3362 | 		mirror_num = num % map->num_stripes + 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3363 | 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) { | 
 | 3364 | 		increment = map->stripe_len; | 
| Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 3365 | 		mirror_num = num % map->num_stripes + 1; | 
| Zhao Lei | ffe2d20 | 2015-01-20 15:11:44 +0800 | [diff] [blame] | 3366 | 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3367 | 		get_raid56_logic_offset(physical, num, map, &offset, NULL); | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3368 | 		increment = map->stripe_len * nr_data_stripes(map); | 
 | 3369 | 		mirror_num = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3370 | 	} else { | 
 | 3371 | 		increment = map->stripe_len; | 
| Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 3372 | 		mirror_num = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3373 | 	} | 
 | 3374 |  | 
 | 3375 | 	path = btrfs_alloc_path(); | 
 | 3376 | 	if (!path) | 
 | 3377 | 		return -ENOMEM; | 
 | 3378 |  | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3379 | 	ppath = btrfs_alloc_path(); | 
 | 3380 | 	if (!ppath) { | 
| Tsutomu Itoh | 379d685 | 2015-01-09 17:37:52 +0900 | [diff] [blame] | 3381 | 		btrfs_free_path(path); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3382 | 		return -ENOMEM; | 
 | 3383 | 	} | 
 | 3384 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 3385 | 	/* | 
 | 3386 | 	 * work on commit root. The related disk blocks are static as | 
 | 3387 | 	 * long as COW is applied. This means, it is save to rewrite | 
 | 3388 | 	 * them to repair disk errors without any race conditions | 
 | 3389 | 	 */ | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3390 | 	path->search_commit_root = 1; | 
 | 3391 | 	path->skip_locking = 1; | 
 | 3392 |  | 
| Gui Hecheng | 063c54d | 2015-01-09 09:39:40 +0800 | [diff] [blame] | 3393 | 	ppath->search_commit_root = 1; | 
 | 3394 | 	ppath->skip_locking = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3395 | 	/* | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3396 | 	 * trigger the readahead for extent tree csum tree and wait for | 
 | 3397 | 	 * completion. During readahead, the scrub is officially paused | 
 | 3398 | 	 * to not hold off transaction commits | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3399 | 	 */ | 
 | 3400 | 	logical = base + offset; | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3401 | 	physical_end = physical + nstripes * map->stripe_len; | 
| Zhao Lei | ffe2d20 | 2015-01-20 15:11:44 +0800 | [diff] [blame] | 3402 | 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3403 | 		get_raid56_logic_offset(physical_end, num, | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3404 | 					map, &logic_end, NULL); | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3405 | 		logic_end += base; | 
 | 3406 | 	} else { | 
 | 3407 | 		logic_end = logical + increment * nstripes; | 
 | 3408 | 	} | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3409 | 	wait_event(sctx->list_wait, | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 3410 | 		   atomic_read(&sctx->bios_in_flight) == 0); | 
| Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 3411 | 	scrub_blocked_if_needed(fs_info); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3412 |  | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3413 | 	/* FIXME it might be better to start readahead at commit root */ | 
| David Sterba | e6c11f9 | 2016-03-24 18:00:53 +0100 | [diff] [blame] | 3414 | 	key.objectid = logical; | 
 | 3415 | 	key.type = BTRFS_EXTENT_ITEM_KEY; | 
 | 3416 | 	key.offset = (u64)0; | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3417 | 	key_end.objectid = logic_end; | 
| Josef Bacik | 3173a18 | 2013-03-07 14:22:04 -0500 | [diff] [blame] | 3418 | 	key_end.type = BTRFS_METADATA_ITEM_KEY; | 
 | 3419 | 	key_end.offset = (u64)-1; | 
| David Sterba | e6c11f9 | 2016-03-24 18:00:53 +0100 | [diff] [blame] | 3420 | 	reada1 = btrfs_reada_add(root, &key, &key_end); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3421 |  | 
| David Sterba | e6c11f9 | 2016-03-24 18:00:53 +0100 | [diff] [blame] | 3422 | 	key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; | 
 | 3423 | 	key.type = BTRFS_EXTENT_CSUM_KEY; | 
 | 3424 | 	key.offset = logical; | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3425 | 	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; | 
 | 3426 | 	key_end.type = BTRFS_EXTENT_CSUM_KEY; | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3427 | 	key_end.offset = logic_end; | 
| David Sterba | e6c11f9 | 2016-03-24 18:00:53 +0100 | [diff] [blame] | 3428 | 	reada2 = btrfs_reada_add(csum_root, &key, &key_end); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3429 |  | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3430 | 	if (!IS_ERR(reada1)) | 
 | 3431 | 		btrfs_reada_wait(reada1); | 
 | 3432 | 	if (!IS_ERR(reada2)) | 
 | 3433 | 		btrfs_reada_wait(reada2); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3434 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3435 |  | 
 | 3436 | 	/* | 
 | 3437 | 	 * collect all data csums for the stripe to avoid seeking during | 
 | 3438 | 	 * the scrub. This might currently (crc32) end up to be about 1MB | 
 | 3439 | 	 */ | 
| Arne Jansen | e7786c3 | 2011-05-28 20:58:38 +0000 | [diff] [blame] | 3440 | 	blk_start_plug(&plug); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3441 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3442 | 	/* | 
 | 3443 | 	 * now find all extents for each stripe and scrub them | 
 | 3444 | 	 */ | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3445 | 	ret = 0; | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3446 | 	while (physical < physical_end) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3447 | 		/* | 
 | 3448 | 		 * canceled? | 
 | 3449 | 		 */ | 
 | 3450 | 		if (atomic_read(&fs_info->scrub_cancel_req) || | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3451 | 		    atomic_read(&sctx->cancel_req)) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3452 | 			ret = -ECANCELED; | 
 | 3453 | 			goto out; | 
 | 3454 | 		} | 
 | 3455 | 		/* | 
 | 3456 | 		 * check to see if we have to pause | 
 | 3457 | 		 */ | 
 | 3458 | 		if (atomic_read(&fs_info->scrub_pause_req)) { | 
 | 3459 | 			/* push queued extents */ | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3460 | 			atomic_set(&sctx->wr_ctx.flush_all_writes, 1); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3461 | 			scrub_submit(sctx); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3462 | 			mutex_lock(&sctx->wr_ctx.wr_lock); | 
 | 3463 | 			scrub_wr_submit(sctx); | 
 | 3464 | 			mutex_unlock(&sctx->wr_ctx.wr_lock); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3465 | 			wait_event(sctx->list_wait, | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 3466 | 				   atomic_read(&sctx->bios_in_flight) == 0); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3467 | 			atomic_set(&sctx->wr_ctx.flush_all_writes, 0); | 
| Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 3468 | 			scrub_blocked_if_needed(fs_info); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3469 | 		} | 
 | 3470 |  | 
| Zhao Lei | f2f66a2 | 2015-07-21 12:22:29 +0800 | [diff] [blame] | 3471 | 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { | 
 | 3472 | 			ret = get_raid56_logic_offset(physical, num, map, | 
 | 3473 | 						      &logical, | 
 | 3474 | 						      &stripe_logical); | 
 | 3475 | 			logical += base; | 
 | 3476 | 			if (ret) { | 
| Zhao Lei | 7955323 | 2015-08-18 17:54:30 +0800 | [diff] [blame] | 3477 | 				/* it is parity strip */ | 
| Zhao Lei | f2f66a2 | 2015-07-21 12:22:29 +0800 | [diff] [blame] | 3478 | 				stripe_logical += base; | 
| Zhao Lei | a0dd59d | 2015-07-21 15:42:26 +0800 | [diff] [blame] | 3479 | 				stripe_end = stripe_logical + increment; | 
| Zhao Lei | f2f66a2 | 2015-07-21 12:22:29 +0800 | [diff] [blame] | 3480 | 				ret = scrub_raid56_parity(sctx, map, scrub_dev, | 
 | 3481 | 							  ppath, stripe_logical, | 
 | 3482 | 							  stripe_end); | 
 | 3483 | 				if (ret) | 
 | 3484 | 					goto out; | 
 | 3485 | 				goto skip; | 
 | 3486 | 			} | 
 | 3487 | 		} | 
 | 3488 |  | 
| Wang Shilong | 7c76edb | 2014-01-12 21:38:32 +0800 | [diff] [blame] | 3489 | 		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) | 
 | 3490 | 			key.type = BTRFS_METADATA_ITEM_KEY; | 
 | 3491 | 		else | 
 | 3492 | 			key.type = BTRFS_EXTENT_ITEM_KEY; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3493 | 		key.objectid = logical; | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3494 | 		key.offset = (u64)-1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3495 |  | 
 | 3496 | 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 
 | 3497 | 		if (ret < 0) | 
 | 3498 | 			goto out; | 
| Josef Bacik | 3173a18 | 2013-03-07 14:22:04 -0500 | [diff] [blame] | 3499 |  | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3500 | 		if (ret > 0) { | 
| Wang Shilong | ade2e0b | 2014-01-12 21:38:33 +0800 | [diff] [blame] | 3501 | 			ret = btrfs_previous_extent_item(root, path, 0); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3502 | 			if (ret < 0) | 
 | 3503 | 				goto out; | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3504 | 			if (ret > 0) { | 
 | 3505 | 				/* there's no smaller item, so stick with the | 
 | 3506 | 				 * larger one */ | 
 | 3507 | 				btrfs_release_path(path); | 
 | 3508 | 				ret = btrfs_search_slot(NULL, root, &key, | 
 | 3509 | 							path, 0, 0); | 
 | 3510 | 				if (ret < 0) | 
 | 3511 | 					goto out; | 
 | 3512 | 			} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3513 | 		} | 
 | 3514 |  | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3515 | 		stop_loop = 0; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3516 | 		while (1) { | 
| Josef Bacik | 3173a18 | 2013-03-07 14:22:04 -0500 | [diff] [blame] | 3517 | 			u64 bytes; | 
 | 3518 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3519 | 			l = path->nodes[0]; | 
 | 3520 | 			slot = path->slots[0]; | 
 | 3521 | 			if (slot >= btrfs_header_nritems(l)) { | 
 | 3522 | 				ret = btrfs_next_leaf(root, path); | 
 | 3523 | 				if (ret == 0) | 
 | 3524 | 					continue; | 
 | 3525 | 				if (ret < 0) | 
 | 3526 | 					goto out; | 
 | 3527 |  | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3528 | 				stop_loop = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3529 | 				break; | 
 | 3530 | 			} | 
 | 3531 | 			btrfs_item_key_to_cpu(l, &key, slot); | 
 | 3532 |  | 
| Zhao Lei | d7cad23 | 2015-07-22 13:14:48 +0800 | [diff] [blame] | 3533 | 			if (key.type != BTRFS_EXTENT_ITEM_KEY && | 
 | 3534 | 			    key.type != BTRFS_METADATA_ITEM_KEY) | 
 | 3535 | 				goto next; | 
 | 3536 |  | 
| Josef Bacik | 3173a18 | 2013-03-07 14:22:04 -0500 | [diff] [blame] | 3537 | 			if (key.type == BTRFS_METADATA_ITEM_KEY) | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3538 | 				bytes = fs_info->nodesize; | 
| Josef Bacik | 3173a18 | 2013-03-07 14:22:04 -0500 | [diff] [blame] | 3539 | 			else | 
 | 3540 | 				bytes = key.offset; | 
 | 3541 |  | 
 | 3542 | 			if (key.objectid + bytes <= logical) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3543 | 				goto next; | 
 | 3544 |  | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3545 | 			if (key.objectid >= logical + map->stripe_len) { | 
 | 3546 | 				/* out of this device extent */ | 
 | 3547 | 				if (key.objectid >= logic_end) | 
 | 3548 | 					stop_loop = 1; | 
 | 3549 | 				break; | 
 | 3550 | 			} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3551 |  | 
 | 3552 | 			extent = btrfs_item_ptr(l, slot, | 
 | 3553 | 						struct btrfs_extent_item); | 
 | 3554 | 			flags = btrfs_extent_flags(l, extent); | 
 | 3555 | 			generation = btrfs_extent_generation(l, extent); | 
 | 3556 |  | 
| Zhao Lei | a323e81 | 2015-07-23 12:29:49 +0800 | [diff] [blame] | 3557 | 			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && | 
 | 3558 | 			    (key.objectid < logical || | 
 | 3559 | 			     key.objectid + bytes > | 
 | 3560 | 			     logical + map->stripe_len)) { | 
| Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 3561 | 				btrfs_err(fs_info, | 
| Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 3562 | 					   "scrub: tree block %llu spanning stripes, ignored. logical=%llu", | 
| Geert Uytterhoeven | c1c9ff7 | 2013-08-20 13:20:07 +0200 | [diff] [blame] | 3563 | 				       key.objectid, logical); | 
| Zhao Lei | 9799d2c3 | 2015-08-25 21:31:40 +0800 | [diff] [blame] | 3564 | 				spin_lock(&sctx->stat_lock); | 
 | 3565 | 				sctx->stat.uncorrectable_errors++; | 
 | 3566 | 				spin_unlock(&sctx->stat_lock); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3567 | 				goto next; | 
 | 3568 | 			} | 
 | 3569 |  | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3570 | again: | 
 | 3571 | 			extent_logical = key.objectid; | 
 | 3572 | 			extent_len = bytes; | 
 | 3573 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3574 | 			/* | 
 | 3575 | 			 * trim extent to this stripe | 
 | 3576 | 			 */ | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3577 | 			if (extent_logical < logical) { | 
 | 3578 | 				extent_len -= logical - extent_logical; | 
 | 3579 | 				extent_logical = logical; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3580 | 			} | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3581 | 			if (extent_logical + extent_len > | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3582 | 			    logical + map->stripe_len) { | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3583 | 				extent_len = logical + map->stripe_len - | 
 | 3584 | 					     extent_logical; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3585 | 			} | 
 | 3586 |  | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3587 | 			extent_physical = extent_logical - logical + physical; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3588 | 			extent_dev = scrub_dev; | 
 | 3589 | 			extent_mirror_num = mirror_num; | 
 | 3590 | 			if (is_dev_replace) | 
 | 3591 | 				scrub_remap_extent(fs_info, extent_logical, | 
 | 3592 | 						   extent_len, &extent_physical, | 
 | 3593 | 						   &extent_dev, | 
 | 3594 | 						   &extent_mirror_num); | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3595 |  | 
| Zhao Lei | fe8cf65 | 2015-07-22 13:14:47 +0800 | [diff] [blame] | 3596 | 			ret = btrfs_lookup_csums_range(csum_root, | 
 | 3597 | 						       extent_logical, | 
 | 3598 | 						       extent_logical + | 
 | 3599 | 						       extent_len - 1, | 
 | 3600 | 						       &sctx->csum_list, 1); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3601 | 			if (ret) | 
 | 3602 | 				goto out; | 
 | 3603 |  | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3604 | 			ret = scrub_extent(sctx, extent_logical, extent_len, | 
 | 3605 | 					   extent_physical, extent_dev, flags, | 
 | 3606 | 					   generation, extent_mirror_num, | 
| Stefan Behrens | 115930c | 2013-07-04 16:14:23 +0200 | [diff] [blame] | 3607 | 					   extent_logical - logical + physical); | 
| Zhao Lei | 6fa96d7 | 2015-07-21 12:22:30 +0800 | [diff] [blame] | 3608 |  | 
 | 3609 | 			scrub_free_csums(sctx); | 
 | 3610 |  | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3611 | 			if (ret) | 
 | 3612 | 				goto out; | 
 | 3613 |  | 
 | 3614 | 			if (extent_logical + extent_len < | 
 | 3615 | 			    key.objectid + bytes) { | 
| Zhao Lei | ffe2d20 | 2015-01-20 15:11:44 +0800 | [diff] [blame] | 3616 | 				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3617 | 					/* | 
 | 3618 | 					 * loop until we find next data stripe | 
 | 3619 | 					 * or we have finished all stripes. | 
 | 3620 | 					 */ | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3621 | loop: | 
 | 3622 | 					physical += map->stripe_len; | 
 | 3623 | 					ret = get_raid56_logic_offset(physical, | 
 | 3624 | 							num, map, &logical, | 
 | 3625 | 							&stripe_logical); | 
 | 3626 | 					logical += base; | 
 | 3627 |  | 
 | 3628 | 					if (ret && physical < physical_end) { | 
 | 3629 | 						stripe_logical += base; | 
 | 3630 | 						stripe_end = stripe_logical + | 
| Zhao Lei | a0dd59d | 2015-07-21 15:42:26 +0800 | [diff] [blame] | 3631 | 								increment; | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3632 | 						ret = scrub_raid56_parity(sctx, | 
 | 3633 | 							map, scrub_dev, ppath, | 
 | 3634 | 							stripe_logical, | 
 | 3635 | 							stripe_end); | 
 | 3636 | 						if (ret) | 
 | 3637 | 							goto out; | 
 | 3638 | 						goto loop; | 
 | 3639 | 					} | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3640 | 				} else { | 
 | 3641 | 					physical += map->stripe_len; | 
 | 3642 | 					logical += increment; | 
 | 3643 | 				} | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3644 | 				if (logical < key.objectid + bytes) { | 
 | 3645 | 					cond_resched(); | 
 | 3646 | 					goto again; | 
 | 3647 | 				} | 
 | 3648 |  | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3649 | 				if (physical >= physical_end) { | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3650 | 					stop_loop = 1; | 
 | 3651 | 					break; | 
 | 3652 | 				} | 
 | 3653 | 			} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3654 | next: | 
 | 3655 | 			path->slots[0]++; | 
 | 3656 | 		} | 
| Chris Mason | 7126733 | 2011-05-23 06:30:52 -0400 | [diff] [blame] | 3657 | 		btrfs_release_path(path); | 
| Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3658 | skip: | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3659 | 		logical += increment; | 
 | 3660 | 		physical += map->stripe_len; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3661 | 		spin_lock(&sctx->stat_lock); | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3662 | 		if (stop_loop) | 
 | 3663 | 			sctx->stat.last_physical = map->stripes[num].physical + | 
 | 3664 | 						   length; | 
 | 3665 | 		else | 
 | 3666 | 			sctx->stat.last_physical = physical; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3667 | 		spin_unlock(&sctx->stat_lock); | 
| Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3668 | 		if (stop_loop) | 
 | 3669 | 			break; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3670 | 	} | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3671 | out: | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3672 | 	/* push queued extents */ | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3673 | 	scrub_submit(sctx); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3674 | 	mutex_lock(&sctx->wr_ctx.wr_lock); | 
 | 3675 | 	scrub_wr_submit(sctx); | 
 | 3676 | 	mutex_unlock(&sctx->wr_ctx.wr_lock); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3677 |  | 
| Arne Jansen | e7786c3 | 2011-05-28 20:58:38 +0000 | [diff] [blame] | 3678 | 	blk_finish_plug(&plug); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3679 | 	btrfs_free_path(path); | 
| Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3680 | 	btrfs_free_path(ppath); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3681 | 	return ret < 0 ? ret : 0; | 
 | 3682 | } | 
 | 3683 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3684 | static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3685 | 					  struct btrfs_device *scrub_dev, | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3686 | 					  u64 chunk_offset, u64 length, | 
| Filipe Manana | 020d5b7 | 2015-11-19 10:57:20 +0000 | [diff] [blame] | 3687 | 					  u64 dev_offset, | 
 | 3688 | 					  struct btrfs_block_group_cache *cache, | 
 | 3689 | 					  int is_dev_replace) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3690 | { | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 3691 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
 | 3692 | 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3693 | 	struct map_lookup *map; | 
 | 3694 | 	struct extent_map *em; | 
 | 3695 | 	int i; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3696 | 	int ret = 0; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3697 |  | 
 | 3698 | 	read_lock(&map_tree->map_tree.lock); | 
 | 3699 | 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); | 
 | 3700 | 	read_unlock(&map_tree->map_tree.lock); | 
 | 3701 |  | 
| Filipe Manana | 020d5b7 | 2015-11-19 10:57:20 +0000 | [diff] [blame] | 3702 | 	if (!em) { | 
 | 3703 | 		/* | 
 | 3704 | 		 * Might have been an unused block group deleted by the cleaner | 
 | 3705 | 		 * kthread or relocation. | 
 | 3706 | 		 */ | 
 | 3707 | 		spin_lock(&cache->lock); | 
 | 3708 | 		if (!cache->removed) | 
 | 3709 | 			ret = -EINVAL; | 
 | 3710 | 		spin_unlock(&cache->lock); | 
 | 3711 |  | 
 | 3712 | 		return ret; | 
 | 3713 | 	} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3714 |  | 
| Jeff Mahoney | 95617d6 | 2015-06-03 10:55:48 -0400 | [diff] [blame] | 3715 | 	map = em->map_lookup; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3716 | 	if (em->start != chunk_offset) | 
 | 3717 | 		goto out; | 
 | 3718 |  | 
 | 3719 | 	if (em->len < length) | 
 | 3720 | 		goto out; | 
 | 3721 |  | 
 | 3722 | 	for (i = 0; i < map->num_stripes; ++i) { | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3723 | 		if (map->stripes[i].dev->bdev == scrub_dev->bdev && | 
| Arne Jansen | 859acaf | 2012-02-09 15:09:02 +0100 | [diff] [blame] | 3724 | 		    map->stripes[i].physical == dev_offset) { | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3725 | 			ret = scrub_stripe(sctx, map, scrub_dev, i, | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3726 | 					   chunk_offset, length, | 
 | 3727 | 					   is_dev_replace); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3728 | 			if (ret) | 
 | 3729 | 				goto out; | 
 | 3730 | 		} | 
 | 3731 | 	} | 
 | 3732 | out: | 
 | 3733 | 	free_extent_map(em); | 
 | 3734 |  | 
 | 3735 | 	return ret; | 
 | 3736 | } | 
 | 3737 |  | 
 | 3738 | static noinline_for_stack | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3739 | int scrub_enumerate_chunks(struct scrub_ctx *sctx, | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3740 | 			   struct btrfs_device *scrub_dev, u64 start, u64 end, | 
 | 3741 | 			   int is_dev_replace) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3742 | { | 
 | 3743 | 	struct btrfs_dev_extent *dev_extent = NULL; | 
 | 3744 | 	struct btrfs_path *path; | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3745 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
 | 3746 | 	struct btrfs_root *root = fs_info->dev_root; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3747 | 	u64 length; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3748 | 	u64 chunk_offset; | 
| Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3749 | 	int ret = 0; | 
| Zhaolei | 76a8efa | 2015-11-17 18:46:17 +0800 | [diff] [blame] | 3750 | 	int ro_set; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3751 | 	int slot; | 
 | 3752 | 	struct extent_buffer *l; | 
 | 3753 | 	struct btrfs_key key; | 
 | 3754 | 	struct btrfs_key found_key; | 
 | 3755 | 	struct btrfs_block_group_cache *cache; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3756 | 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3757 |  | 
 | 3758 | 	path = btrfs_alloc_path(); | 
 | 3759 | 	if (!path) | 
 | 3760 | 		return -ENOMEM; | 
 | 3761 |  | 
| David Sterba | e4058b5 | 2015-11-27 16:31:35 +0100 | [diff] [blame] | 3762 | 	path->reada = READA_FORWARD; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3763 | 	path->search_commit_root = 1; | 
 | 3764 | 	path->skip_locking = 1; | 
 | 3765 |  | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3766 | 	key.objectid = scrub_dev->devid; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3767 | 	key.offset = 0ull; | 
 | 3768 | 	key.type = BTRFS_DEV_EXTENT_KEY; | 
 | 3769 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3770 | 	while (1) { | 
 | 3771 | 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 
 | 3772 | 		if (ret < 0) | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3773 | 			break; | 
 | 3774 | 		if (ret > 0) { | 
 | 3775 | 			if (path->slots[0] >= | 
 | 3776 | 			    btrfs_header_nritems(path->nodes[0])) { | 
 | 3777 | 				ret = btrfs_next_leaf(root, path); | 
| Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3778 | 				if (ret < 0) | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3779 | 					break; | 
| Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3780 | 				if (ret > 0) { | 
 | 3781 | 					ret = 0; | 
 | 3782 | 					break; | 
 | 3783 | 				} | 
 | 3784 | 			} else { | 
 | 3785 | 				ret = 0; | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3786 | 			} | 
 | 3787 | 		} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3788 |  | 
 | 3789 | 		l = path->nodes[0]; | 
 | 3790 | 		slot = path->slots[0]; | 
 | 3791 |  | 
 | 3792 | 		btrfs_item_key_to_cpu(l, &found_key, slot); | 
 | 3793 |  | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3794 | 		if (found_key.objectid != scrub_dev->devid) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3795 | 			break; | 
 | 3796 |  | 
| David Sterba | 962a298 | 2014-06-04 18:41:45 +0200 | [diff] [blame] | 3797 | 		if (found_key.type != BTRFS_DEV_EXTENT_KEY) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3798 | 			break; | 
 | 3799 |  | 
 | 3800 | 		if (found_key.offset >= end) | 
 | 3801 | 			break; | 
 | 3802 |  | 
 | 3803 | 		if (found_key.offset < key.offset) | 
 | 3804 | 			break; | 
 | 3805 |  | 
 | 3806 | 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); | 
 | 3807 | 		length = btrfs_dev_extent_length(l, dev_extent); | 
 | 3808 |  | 
| Qu Wenruo | ced96ed | 2014-06-19 10:42:51 +0800 | [diff] [blame] | 3809 | 		if (found_key.offset + length <= start) | 
 | 3810 | 			goto skip; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3811 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3812 | 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); | 
 | 3813 |  | 
 | 3814 | 		/* | 
 | 3815 | 		 * get a reference on the corresponding block group to prevent | 
 | 3816 | 		 * the chunk from going away while we scrub it | 
 | 3817 | 		 */ | 
 | 3818 | 		cache = btrfs_lookup_block_group(fs_info, chunk_offset); | 
| Qu Wenruo | ced96ed | 2014-06-19 10:42:51 +0800 | [diff] [blame] | 3819 |  | 
 | 3820 | 		/* some chunks are removed but not committed to disk yet, | 
 | 3821 | 		 * continue scrubbing */ | 
 | 3822 | 		if (!cache) | 
 | 3823 | 			goto skip; | 
 | 3824 |  | 
| Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3825 | 		/* | 
 | 3826 | 		 * we need call btrfs_inc_block_group_ro() with scrubs_paused, | 
 | 3827 | 		 * to avoid deadlock caused by: | 
 | 3828 | 		 * btrfs_inc_block_group_ro() | 
 | 3829 | 		 * -> btrfs_wait_for_commit() | 
 | 3830 | 		 * -> btrfs_commit_transaction() | 
 | 3831 | 		 * -> btrfs_scrub_pause() | 
 | 3832 | 		 */ | 
 | 3833 | 		scrub_pause_on(fs_info); | 
| Jeff Mahoney | 5e00f19 | 2017-02-15 16:28:29 -0500 | [diff] [blame] | 3834 | 		ret = btrfs_inc_block_group_ro(fs_info, cache); | 
| Filipe Manana | f0e9b7d | 2016-05-14 09:12:53 +0100 | [diff] [blame] | 3835 | 		if (!ret && is_dev_replace) { | 
 | 3836 | 			/* | 
 | 3837 | 			 * If we are doing a device replace wait for any tasks | 
 | 3838 | 			 * that started dellaloc right before we set the block | 
 | 3839 | 			 * group to RO mode, as they might have just allocated | 
 | 3840 | 			 * an extent from it or decided they could do a nocow | 
 | 3841 | 			 * write. And if any such tasks did that, wait for their | 
 | 3842 | 			 * ordered extents to complete and then commit the | 
 | 3843 | 			 * current transaction, so that we can later see the new | 
 | 3844 | 			 * extent items in the extent tree - the ordered extents | 
 | 3845 | 			 * create delayed data references (for cow writes) when | 
 | 3846 | 			 * they complete, which will be run and insert the | 
 | 3847 | 			 * corresponding extent items into the extent tree when | 
 | 3848 | 			 * we commit the transaction they used when running | 
 | 3849 | 			 * inode.c:btrfs_finish_ordered_io(). We later use | 
 | 3850 | 			 * the commit root of the extent tree to find extents | 
 | 3851 | 			 * to copy from the srcdev into the tgtdev, and we don't | 
 | 3852 | 			 * want to miss any new extents. | 
 | 3853 | 			 */ | 
 | 3854 | 			btrfs_wait_block_group_reservations(cache); | 
 | 3855 | 			btrfs_wait_nocow_writers(cache); | 
 | 3856 | 			ret = btrfs_wait_ordered_roots(fs_info, -1, | 
 | 3857 | 						       cache->key.objectid, | 
 | 3858 | 						       cache->key.offset); | 
 | 3859 | 			if (ret > 0) { | 
 | 3860 | 				struct btrfs_trans_handle *trans; | 
 | 3861 |  | 
 | 3862 | 				trans = btrfs_join_transaction(root); | 
 | 3863 | 				if (IS_ERR(trans)) | 
 | 3864 | 					ret = PTR_ERR(trans); | 
 | 3865 | 				else | 
| Jeff Mahoney | 3a45bb2 | 2016-09-09 21:39:03 -0400 | [diff] [blame] | 3866 | 					ret = btrfs_commit_transaction(trans); | 
| Filipe Manana | f0e9b7d | 2016-05-14 09:12:53 +0100 | [diff] [blame] | 3867 | 				if (ret) { | 
 | 3868 | 					scrub_pause_off(fs_info); | 
 | 3869 | 					btrfs_put_block_group(cache); | 
 | 3870 | 					break; | 
 | 3871 | 				} | 
 | 3872 | 			} | 
 | 3873 | 		} | 
| Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3874 | 		scrub_pause_off(fs_info); | 
| Zhaolei | 76a8efa | 2015-11-17 18:46:17 +0800 | [diff] [blame] | 3875 |  | 
 | 3876 | 		if (ret == 0) { | 
 | 3877 | 			ro_set = 1; | 
 | 3878 | 		} else if (ret == -ENOSPC) { | 
 | 3879 | 			/* | 
 | 3880 | 			 * btrfs_inc_block_group_ro return -ENOSPC when it | 
 | 3881 | 			 * failed in creating new chunk for metadata. | 
 | 3882 | 			 * It is not a problem for scrub/replace, because | 
 | 3883 | 			 * metadata are always cowed, and our scrub paused | 
 | 3884 | 			 * commit_transactions. | 
 | 3885 | 			 */ | 
 | 3886 | 			ro_set = 0; | 
 | 3887 | 		} else { | 
| Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 3888 | 			btrfs_warn(fs_info, | 
 | 3889 | 				   "failed setting block group ro, ret=%d\n", | 
| Zhaolei | 76a8efa | 2015-11-17 18:46:17 +0800 | [diff] [blame] | 3890 | 				   ret); | 
| Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3891 | 			btrfs_put_block_group(cache); | 
 | 3892 | 			break; | 
 | 3893 | 		} | 
 | 3894 |  | 
| Filipe Manana | 81e87a7 | 2016-05-14 16:32:35 +0100 | [diff] [blame] | 3895 | 		btrfs_dev_replace_lock(&fs_info->dev_replace, 1); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3896 | 		dev_replace->cursor_right = found_key.offset + length; | 
 | 3897 | 		dev_replace->cursor_left = found_key.offset; | 
 | 3898 | 		dev_replace->item_needs_writeback = 1; | 
| Filipe Manana | 81e87a7 | 2016-05-14 16:32:35 +0100 | [diff] [blame] | 3899 | 		btrfs_dev_replace_unlock(&fs_info->dev_replace, 1); | 
| Zhao Lei | 8c204c9 | 2015-08-19 15:02:40 +0800 | [diff] [blame] | 3900 | 		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, | 
| Filipe Manana | 020d5b7 | 2015-11-19 10:57:20 +0000 | [diff] [blame] | 3901 | 				  found_key.offset, cache, is_dev_replace); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3902 |  | 
 | 3903 | 		/* | 
 | 3904 | 		 * flush, submit all pending read and write bios, afterwards | 
 | 3905 | 		 * wait for them. | 
 | 3906 | 		 * Note that in the dev replace case, a read request causes | 
 | 3907 | 		 * write requests that are submitted in the read completion | 
 | 3908 | 		 * worker. Therefore in the current situation, it is required | 
 | 3909 | 		 * that all write requests are flushed, so that all read and | 
 | 3910 | 		 * write requests are really completed when bios_in_flight | 
 | 3911 | 		 * changes to 0. | 
 | 3912 | 		 */ | 
 | 3913 | 		atomic_set(&sctx->wr_ctx.flush_all_writes, 1); | 
 | 3914 | 		scrub_submit(sctx); | 
 | 3915 | 		mutex_lock(&sctx->wr_ctx.wr_lock); | 
 | 3916 | 		scrub_wr_submit(sctx); | 
 | 3917 | 		mutex_unlock(&sctx->wr_ctx.wr_lock); | 
 | 3918 |  | 
 | 3919 | 		wait_event(sctx->list_wait, | 
 | 3920 | 			   atomic_read(&sctx->bios_in_flight) == 0); | 
| Zhaolei | b708ce9 | 2015-08-05 16:43:29 +0800 | [diff] [blame] | 3921 |  | 
 | 3922 | 		scrub_pause_on(fs_info); | 
| Wang Shilong | 12cf937 | 2014-02-19 19:24:17 +0800 | [diff] [blame] | 3923 |  | 
 | 3924 | 		/* | 
 | 3925 | 		 * must be called before we decrease @scrub_paused. | 
 | 3926 | 		 * make sure we don't block transaction commit while | 
 | 3927 | 		 * we are waiting pending workers finished. | 
 | 3928 | 		 */ | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3929 | 		wait_event(sctx->list_wait, | 
 | 3930 | 			   atomic_read(&sctx->workers_pending) == 0); | 
| Wang Shilong | 12cf937 | 2014-02-19 19:24:17 +0800 | [diff] [blame] | 3931 | 		atomic_set(&sctx->wr_ctx.flush_all_writes, 0); | 
 | 3932 |  | 
| Zhaolei | b708ce9 | 2015-08-05 16:43:29 +0800 | [diff] [blame] | 3933 | 		scrub_pause_off(fs_info); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3934 |  | 
| Filipe Manana | 1a1a8b7 | 2016-05-14 19:44:40 +0100 | [diff] [blame] | 3935 | 		btrfs_dev_replace_lock(&fs_info->dev_replace, 1); | 
 | 3936 | 		dev_replace->cursor_left = dev_replace->cursor_right; | 
 | 3937 | 		dev_replace->item_needs_writeback = 1; | 
 | 3938 | 		btrfs_dev_replace_unlock(&fs_info->dev_replace, 1); | 
 | 3939 |  | 
| Zhaolei | 76a8efa | 2015-11-17 18:46:17 +0800 | [diff] [blame] | 3940 | 		if (ro_set) | 
| Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 3941 | 			btrfs_dec_block_group_ro(cache); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3942 |  | 
| Filipe Manana | 758f2df | 2015-11-19 11:45:48 +0000 | [diff] [blame] | 3943 | 		/* | 
 | 3944 | 		 * We might have prevented the cleaner kthread from deleting | 
 | 3945 | 		 * this block group if it was already unused because we raced | 
 | 3946 | 		 * and set it to RO mode first. So add it back to the unused | 
 | 3947 | 		 * list, otherwise it might not ever be deleted unless a manual | 
 | 3948 | 		 * balance is triggered or it becomes used and unused again. | 
 | 3949 | 		 */ | 
 | 3950 | 		spin_lock(&cache->lock); | 
 | 3951 | 		if (!cache->removed && !cache->ro && cache->reserved == 0 && | 
 | 3952 | 		    btrfs_block_group_used(&cache->item) == 0) { | 
 | 3953 | 			spin_unlock(&cache->lock); | 
 | 3954 | 			spin_lock(&fs_info->unused_bgs_lock); | 
 | 3955 | 			if (list_empty(&cache->bg_list)) { | 
 | 3956 | 				btrfs_get_block_group(cache); | 
 | 3957 | 				list_add_tail(&cache->bg_list, | 
 | 3958 | 					      &fs_info->unused_bgs); | 
 | 3959 | 			} | 
 | 3960 | 			spin_unlock(&fs_info->unused_bgs_lock); | 
 | 3961 | 		} else { | 
 | 3962 | 			spin_unlock(&cache->lock); | 
 | 3963 | 		} | 
 | 3964 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3965 | 		btrfs_put_block_group(cache); | 
 | 3966 | 		if (ret) | 
 | 3967 | 			break; | 
| Stefan Behrens | af1be4f | 2012-11-27 17:39:51 +0000 | [diff] [blame] | 3968 | 		if (is_dev_replace && | 
 | 3969 | 		    atomic64_read(&dev_replace->num_write_errors) > 0) { | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3970 | 			ret = -EIO; | 
 | 3971 | 			break; | 
 | 3972 | 		} | 
 | 3973 | 		if (sctx->stat.malloc_errors > 0) { | 
 | 3974 | 			ret = -ENOMEM; | 
 | 3975 | 			break; | 
 | 3976 | 		} | 
| Qu Wenruo | ced96ed | 2014-06-19 10:42:51 +0800 | [diff] [blame] | 3977 | skip: | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3978 | 		key.offset = found_key.offset + length; | 
| Chris Mason | 7126733 | 2011-05-23 06:30:52 -0400 | [diff] [blame] | 3979 | 		btrfs_release_path(path); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3980 | 	} | 
 | 3981 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3982 | 	btrfs_free_path(path); | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3983 |  | 
| Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3984 | 	return ret; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3985 | } | 
 | 3986 |  | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3987 | static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, | 
 | 3988 | 					   struct btrfs_device *scrub_dev) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3989 | { | 
 | 3990 | 	int	i; | 
 | 3991 | 	u64	bytenr; | 
 | 3992 | 	u64	gen; | 
 | 3993 | 	int	ret; | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3994 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3995 |  | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3996 | 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) | 
| Jeff Mahoney | 79787ea | 2012-03-12 16:03:00 +0100 | [diff] [blame] | 3997 | 		return -EIO; | 
 | 3998 |  | 
| Miao Xie | 5f54606 | 2014-07-24 11:37:09 +0800 | [diff] [blame] | 3999 | 	/* Seed devices of a new filesystem has their own generation. */ | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 4000 | 	if (scrub_dev->fs_devices != fs_info->fs_devices) | 
| Miao Xie | 5f54606 | 2014-07-24 11:37:09 +0800 | [diff] [blame] | 4001 | 		gen = scrub_dev->generation; | 
 | 4002 | 	else | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 4003 | 		gen = fs_info->last_trans_committed; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4004 |  | 
 | 4005 | 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | 
 | 4006 | 		bytenr = btrfs_sb_offset(i); | 
| Miao Xie | 935e5cc | 2014-09-03 21:35:33 +0800 | [diff] [blame] | 4007 | 		if (bytenr + BTRFS_SUPER_INFO_SIZE > | 
 | 4008 | 		    scrub_dev->commit_total_bytes) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4009 | 			break; | 
 | 4010 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4011 | 		ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, | 
| Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 4012 | 				  scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i, | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4013 | 				  NULL, 1, bytenr); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4014 | 		if (ret) | 
 | 4015 | 			return ret; | 
 | 4016 | 	} | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 4017 | 	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4018 |  | 
 | 4019 | 	return 0; | 
 | 4020 | } | 
 | 4021 |  | 
 | 4022 | /* | 
 | 4023 |  * get a reference count on fs_info->scrub_workers. start worker if necessary | 
 | 4024 |  */ | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4025 | static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, | 
 | 4026 | 						int is_dev_replace) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4027 | { | 
| David Sterba | 6f01105 | 2015-02-16 18:34:01 +0100 | [diff] [blame] | 4028 | 	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 4029 | 	int max_active = fs_info->thread_pool_size; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4030 |  | 
| Arne Jansen | 632dd77 | 2011-06-10 12:07:07 +0200 | [diff] [blame] | 4031 | 	if (fs_info->scrub_workers_refcnt == 0) { | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4032 | 		if (is_dev_replace) | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 4033 | 			fs_info->scrub_workers = | 
| Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 4034 | 				btrfs_alloc_workqueue(fs_info, "scrub", flags, | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 4035 | 						      1, 4); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4036 | 		else | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 4037 | 			fs_info->scrub_workers = | 
| Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 4038 | 				btrfs_alloc_workqueue(fs_info, "scrub", flags, | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 4039 | 						      max_active, 4); | 
| Zhao Lei | e82afc5 | 2015-06-12 20:36:58 +0800 | [diff] [blame] | 4040 | 		if (!fs_info->scrub_workers) | 
 | 4041 | 			goto fail_scrub_workers; | 
 | 4042 |  | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 4043 | 		fs_info->scrub_wr_completion_workers = | 
| Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 4044 | 			btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 4045 | 					      max_active, 2); | 
| Zhao Lei | e82afc5 | 2015-06-12 20:36:58 +0800 | [diff] [blame] | 4046 | 		if (!fs_info->scrub_wr_completion_workers) | 
 | 4047 | 			goto fail_scrub_wr_completion_workers; | 
 | 4048 |  | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 4049 | 		fs_info->scrub_nocow_workers = | 
| Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 4050 | 			btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0); | 
| Zhao Lei | e82afc5 | 2015-06-12 20:36:58 +0800 | [diff] [blame] | 4051 | 		if (!fs_info->scrub_nocow_workers) | 
 | 4052 | 			goto fail_scrub_nocow_workers; | 
| Zhao Lei | 20b2e30 | 2015-06-04 20:09:15 +0800 | [diff] [blame] | 4053 | 		fs_info->scrub_parity_workers = | 
| Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 4054 | 			btrfs_alloc_workqueue(fs_info, "scrubparity", flags, | 
| Zhao Lei | 20b2e30 | 2015-06-04 20:09:15 +0800 | [diff] [blame] | 4055 | 					      max_active, 2); | 
| Zhao Lei | e82afc5 | 2015-06-12 20:36:58 +0800 | [diff] [blame] | 4056 | 		if (!fs_info->scrub_parity_workers) | 
 | 4057 | 			goto fail_scrub_parity_workers; | 
| Arne Jansen | 632dd77 | 2011-06-10 12:07:07 +0200 | [diff] [blame] | 4058 | 	} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4059 | 	++fs_info->scrub_workers_refcnt; | 
| Zhao Lei | e82afc5 | 2015-06-12 20:36:58 +0800 | [diff] [blame] | 4060 | 	return 0; | 
 | 4061 |  | 
 | 4062 | fail_scrub_parity_workers: | 
 | 4063 | 	btrfs_destroy_workqueue(fs_info->scrub_nocow_workers); | 
 | 4064 | fail_scrub_nocow_workers: | 
 | 4065 | 	btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); | 
 | 4066 | fail_scrub_wr_completion_workers: | 
 | 4067 | 	btrfs_destroy_workqueue(fs_info->scrub_workers); | 
 | 4068 | fail_scrub_workers: | 
 | 4069 | 	return -ENOMEM; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4070 | } | 
 | 4071 |  | 
| Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4072 | static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4073 | { | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4074 | 	if (--fs_info->scrub_workers_refcnt == 0) { | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 4075 | 		btrfs_destroy_workqueue(fs_info->scrub_workers); | 
 | 4076 | 		btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); | 
 | 4077 | 		btrfs_destroy_workqueue(fs_info->scrub_nocow_workers); | 
| Zhao Lei | 20b2e30 | 2015-06-04 20:09:15 +0800 | [diff] [blame] | 4078 | 		btrfs_destroy_workqueue(fs_info->scrub_parity_workers); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4079 | 	} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4080 | 	WARN_ON(fs_info->scrub_workers_refcnt < 0); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4081 | } | 
 | 4082 |  | 
| Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4083 | int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, | 
 | 4084 | 		    u64 end, struct btrfs_scrub_progress *progress, | 
| Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 4085 | 		    int readonly, int is_dev_replace) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4086 | { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4087 | 	struct scrub_ctx *sctx; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4088 | 	int ret; | 
 | 4089 | 	struct btrfs_device *dev; | 
| Miao Xie | 5d68da3 | 2014-07-24 11:37:07 +0800 | [diff] [blame] | 4090 | 	struct rcu_string *name; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4091 |  | 
| Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4092 | 	if (btrfs_fs_closing(fs_info)) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4093 | 		return -EINVAL; | 
 | 4094 |  | 
| Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4095 | 	if (fs_info->nodesize > BTRFS_STRIPE_LEN) { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 4096 | 		/* | 
 | 4097 | 		 * in this case scrub is unable to calculate the checksum | 
 | 4098 | 		 * the way scrub is implemented. Do not handle this | 
 | 4099 | 		 * situation at all because it won't ever happen. | 
 | 4100 | 		 */ | 
| Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 4101 | 		btrfs_err(fs_info, | 
 | 4102 | 			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails", | 
| Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4103 | 		       fs_info->nodesize, | 
 | 4104 | 		       BTRFS_STRIPE_LEN); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 4105 | 		return -EINVAL; | 
 | 4106 | 	} | 
 | 4107 |  | 
| Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4108 | 	if (fs_info->sectorsize != PAGE_SIZE) { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 4109 | 		/* not supported for data w/o checksums */ | 
| Chandan Rajendra | 751bebb | 2016-07-04 10:04:39 +0530 | [diff] [blame] | 4110 | 		btrfs_err_rl(fs_info, | 
| Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 4111 | 			   "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails", | 
| Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4112 | 		       fs_info->sectorsize, PAGE_SIZE); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4113 | 		return -EINVAL; | 
 | 4114 | 	} | 
 | 4115 |  | 
| Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4116 | 	if (fs_info->nodesize > | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 4117 | 	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK || | 
| Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4118 | 	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 4119 | 		/* | 
 | 4120 | 		 * would exhaust the array bounds of pagev member in | 
 | 4121 | 		 * struct scrub_block | 
 | 4122 | 		 */ | 
| Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 4123 | 		btrfs_err(fs_info, | 
 | 4124 | 			  "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails", | 
| Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4125 | 		       fs_info->nodesize, | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 4126 | 		       SCRUB_MAX_PAGES_PER_BLOCK, | 
| Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4127 | 		       fs_info->sectorsize, | 
| Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 4128 | 		       SCRUB_MAX_PAGES_PER_BLOCK); | 
 | 4129 | 		return -EINVAL; | 
 | 4130 | 	} | 
 | 4131 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4132 |  | 
| Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4133 | 	mutex_lock(&fs_info->fs_devices->device_list_mutex); | 
 | 4134 | 	dev = btrfs_find_device(fs_info, devid, NULL, NULL); | 
| Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 4135 | 	if (!dev || (dev->missing && !is_dev_replace)) { | 
| Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4136 | 		mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4137 | 		return -ENODEV; | 
 | 4138 | 	} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4139 |  | 
| Miao Xie | 5d68da3 | 2014-07-24 11:37:07 +0800 | [diff] [blame] | 4140 | 	if (!is_dev_replace && !readonly && !dev->writeable) { | 
 | 4141 | 		mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 
 | 4142 | 		rcu_read_lock(); | 
 | 4143 | 		name = rcu_dereference(dev->name); | 
 | 4144 | 		btrfs_err(fs_info, "scrub: device %s is not writable", | 
 | 4145 | 			  name->str); | 
 | 4146 | 		rcu_read_unlock(); | 
 | 4147 | 		return -EROFS; | 
 | 4148 | 	} | 
 | 4149 |  | 
| Wang Shilong | 3b7a016 | 2013-10-12 02:11:12 +0800 | [diff] [blame] | 4150 | 	mutex_lock(&fs_info->scrub_lock); | 
| Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 4151 | 	if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4152 | 		mutex_unlock(&fs_info->scrub_lock); | 
| Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4153 | 		mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 
| Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4154 | 		return -EIO; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4155 | 	} | 
 | 4156 |  | 
| Liu Bo | 73beece | 2015-07-17 16:49:19 +0800 | [diff] [blame] | 4157 | 	btrfs_dev_replace_lock(&fs_info->dev_replace, 0); | 
| Stefan Behrens | 8dabb74 | 2012-11-06 13:15:27 +0100 | [diff] [blame] | 4158 | 	if (dev->scrub_device || | 
 | 4159 | 	    (!is_dev_replace && | 
 | 4160 | 	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { | 
| Liu Bo | 73beece | 2015-07-17 16:49:19 +0800 | [diff] [blame] | 4161 | 		btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4162 | 		mutex_unlock(&fs_info->scrub_lock); | 
| Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4163 | 		mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4164 | 		return -EINPROGRESS; | 
 | 4165 | 	} | 
| Liu Bo | 73beece | 2015-07-17 16:49:19 +0800 | [diff] [blame] | 4166 | 	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); | 
| Wang Shilong | 3b7a016 | 2013-10-12 02:11:12 +0800 | [diff] [blame] | 4167 |  | 
 | 4168 | 	ret = scrub_workers_get(fs_info, is_dev_replace); | 
 | 4169 | 	if (ret) { | 
 | 4170 | 		mutex_unlock(&fs_info->scrub_lock); | 
 | 4171 | 		mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 
 | 4172 | 		return ret; | 
 | 4173 | 	} | 
 | 4174 |  | 
| Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 4175 | 	sctx = scrub_setup_ctx(dev, is_dev_replace); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4176 | 	if (IS_ERR(sctx)) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4177 | 		mutex_unlock(&fs_info->scrub_lock); | 
| Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4178 | 		mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 
 | 4179 | 		scrub_workers_put(fs_info); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4180 | 		return PTR_ERR(sctx); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4181 | 	} | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4182 | 	sctx->readonly = readonly; | 
 | 4183 | 	dev->scrub_device = sctx; | 
| Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 4184 | 	mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4185 |  | 
| Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 4186 | 	/* | 
 | 4187 | 	 * checking @scrub_pause_req here, we can avoid | 
 | 4188 | 	 * race between committing transaction and scrubbing. | 
 | 4189 | 	 */ | 
| Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 4190 | 	__scrub_blocked_if_needed(fs_info); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4191 | 	atomic_inc(&fs_info->scrubs_running); | 
 | 4192 | 	mutex_unlock(&fs_info->scrub_lock); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4193 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4194 | 	if (!is_dev_replace) { | 
| Wang Shilong | 9b011ad | 2013-10-25 19:12:02 +0800 | [diff] [blame] | 4195 | 		/* | 
 | 4196 | 		 * by holding device list mutex, we can | 
 | 4197 | 		 * kick off writing super in log tree sync. | 
 | 4198 | 		 */ | 
| Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 4199 | 		mutex_lock(&fs_info->fs_devices->device_list_mutex); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4200 | 		ret = scrub_supers(sctx, dev); | 
| Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 4201 | 		mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4202 | 	} | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4203 |  | 
 | 4204 | 	if (!ret) | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4205 | 		ret = scrub_enumerate_chunks(sctx, dev, start, end, | 
 | 4206 | 					     is_dev_replace); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4207 |  | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 4208 | 	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4209 | 	atomic_dec(&fs_info->scrubs_running); | 
 | 4210 | 	wake_up(&fs_info->scrub_pause_wait); | 
 | 4211 |  | 
| Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 4212 | 	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 4213 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4214 | 	if (progress) | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4215 | 		memcpy(progress, &sctx->stat, sizeof(*progress)); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4216 |  | 
 | 4217 | 	mutex_lock(&fs_info->scrub_lock); | 
 | 4218 | 	dev->scrub_device = NULL; | 
| Wang Shilong | 3b7a016 | 2013-10-12 02:11:12 +0800 | [diff] [blame] | 4219 | 	scrub_workers_put(fs_info); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4220 | 	mutex_unlock(&fs_info->scrub_lock); | 
 | 4221 |  | 
| Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 4222 | 	scrub_put_ctx(sctx); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4223 |  | 
 | 4224 | 	return ret; | 
 | 4225 | } | 
 | 4226 |  | 
| Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 4227 | void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4228 | { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4229 | 	mutex_lock(&fs_info->scrub_lock); | 
 | 4230 | 	atomic_inc(&fs_info->scrub_pause_req); | 
 | 4231 | 	while (atomic_read(&fs_info->scrubs_paused) != | 
 | 4232 | 	       atomic_read(&fs_info->scrubs_running)) { | 
 | 4233 | 		mutex_unlock(&fs_info->scrub_lock); | 
 | 4234 | 		wait_event(fs_info->scrub_pause_wait, | 
 | 4235 | 			   atomic_read(&fs_info->scrubs_paused) == | 
 | 4236 | 			   atomic_read(&fs_info->scrubs_running)); | 
 | 4237 | 		mutex_lock(&fs_info->scrub_lock); | 
 | 4238 | 	} | 
 | 4239 | 	mutex_unlock(&fs_info->scrub_lock); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4240 | } | 
 | 4241 |  | 
| Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 4242 | void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4243 | { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4244 | 	atomic_dec(&fs_info->scrub_pause_req); | 
 | 4245 | 	wake_up(&fs_info->scrub_pause_wait); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4246 | } | 
 | 4247 |  | 
| Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4248 | int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4249 | { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4250 | 	mutex_lock(&fs_info->scrub_lock); | 
 | 4251 | 	if (!atomic_read(&fs_info->scrubs_running)) { | 
 | 4252 | 		mutex_unlock(&fs_info->scrub_lock); | 
 | 4253 | 		return -ENOTCONN; | 
 | 4254 | 	} | 
 | 4255 |  | 
 | 4256 | 	atomic_inc(&fs_info->scrub_cancel_req); | 
 | 4257 | 	while (atomic_read(&fs_info->scrubs_running)) { | 
 | 4258 | 		mutex_unlock(&fs_info->scrub_lock); | 
 | 4259 | 		wait_event(fs_info->scrub_pause_wait, | 
 | 4260 | 			   atomic_read(&fs_info->scrubs_running) == 0); | 
 | 4261 | 		mutex_lock(&fs_info->scrub_lock); | 
 | 4262 | 	} | 
 | 4263 | 	atomic_dec(&fs_info->scrub_cancel_req); | 
 | 4264 | 	mutex_unlock(&fs_info->scrub_lock); | 
 | 4265 |  | 
 | 4266 | 	return 0; | 
 | 4267 | } | 
 | 4268 |  | 
| Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4269 | int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info, | 
 | 4270 | 			   struct btrfs_device *dev) | 
| Jeff Mahoney | 49b25e0 | 2012-03-01 17:24:58 +0100 | [diff] [blame] | 4271 | { | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4272 | 	struct scrub_ctx *sctx; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4273 |  | 
 | 4274 | 	mutex_lock(&fs_info->scrub_lock); | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4275 | 	sctx = dev->scrub_device; | 
 | 4276 | 	if (!sctx) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4277 | 		mutex_unlock(&fs_info->scrub_lock); | 
 | 4278 | 		return -ENOTCONN; | 
 | 4279 | 	} | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4280 | 	atomic_inc(&sctx->cancel_req); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4281 | 	while (dev->scrub_device) { | 
 | 4282 | 		mutex_unlock(&fs_info->scrub_lock); | 
 | 4283 | 		wait_event(fs_info->scrub_pause_wait, | 
 | 4284 | 			   dev->scrub_device == NULL); | 
 | 4285 | 		mutex_lock(&fs_info->scrub_lock); | 
 | 4286 | 	} | 
 | 4287 | 	mutex_unlock(&fs_info->scrub_lock); | 
 | 4288 |  | 
 | 4289 | 	return 0; | 
 | 4290 | } | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 4291 |  | 
| Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 4292 | int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4293 | 			 struct btrfs_scrub_progress *progress) | 
 | 4294 | { | 
 | 4295 | 	struct btrfs_device *dev; | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4296 | 	struct scrub_ctx *sctx = NULL; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4297 |  | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 4298 | 	mutex_lock(&fs_info->fs_devices->device_list_mutex); | 
 | 4299 | 	dev = btrfs_find_device(fs_info, devid, NULL, NULL); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4300 | 	if (dev) | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4301 | 		sctx = dev->scrub_device; | 
 | 4302 | 	if (sctx) | 
 | 4303 | 		memcpy(progress, &sctx->stat, sizeof(*progress)); | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 4304 | 	mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4305 |  | 
| Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4306 | 	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4307 | } | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4308 |  | 
 | 4309 | static void scrub_remap_extent(struct btrfs_fs_info *fs_info, | 
 | 4310 | 			       u64 extent_logical, u64 extent_len, | 
 | 4311 | 			       u64 *extent_physical, | 
 | 4312 | 			       struct btrfs_device **extent_dev, | 
 | 4313 | 			       int *extent_mirror_num) | 
 | 4314 | { | 
 | 4315 | 	u64 mapped_length; | 
 | 4316 | 	struct btrfs_bio *bbio = NULL; | 
 | 4317 | 	int ret; | 
 | 4318 |  | 
 | 4319 | 	mapped_length = extent_len; | 
| Christoph Hellwig | cf8cddd | 2016-10-27 09:27:36 +0200 | [diff] [blame] | 4320 | 	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4321 | 			      &mapped_length, &bbio, 0); | 
 | 4322 | 	if (ret || !bbio || mapped_length < extent_len || | 
 | 4323 | 	    !bbio->stripes[0].dev->bdev) { | 
| Zhao Lei | 6e9606d | 2015-01-20 15:11:34 +0800 | [diff] [blame] | 4324 | 		btrfs_put_bbio(bbio); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4325 | 		return; | 
 | 4326 | 	} | 
 | 4327 |  | 
 | 4328 | 	*extent_physical = bbio->stripes[0].physical; | 
 | 4329 | 	*extent_mirror_num = bbio->mirror_num; | 
 | 4330 | 	*extent_dev = bbio->stripes[0].dev; | 
| Zhao Lei | 6e9606d | 2015-01-20 15:11:34 +0800 | [diff] [blame] | 4331 | 	btrfs_put_bbio(bbio); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4332 | } | 
 | 4333 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4334 | static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, | 
 | 4335 | 			    int mirror_num, u64 physical_for_dev_replace) | 
 | 4336 | { | 
 | 4337 | 	struct scrub_copy_nocow_ctx *nocow_ctx; | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 4338 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4339 |  | 
 | 4340 | 	nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS); | 
 | 4341 | 	if (!nocow_ctx) { | 
 | 4342 | 		spin_lock(&sctx->stat_lock); | 
 | 4343 | 		sctx->stat.malloc_errors++; | 
 | 4344 | 		spin_unlock(&sctx->stat_lock); | 
 | 4345 | 		return -ENOMEM; | 
 | 4346 | 	} | 
 | 4347 |  | 
 | 4348 | 	scrub_pending_trans_workers_inc(sctx); | 
 | 4349 |  | 
 | 4350 | 	nocow_ctx->sctx = sctx; | 
 | 4351 | 	nocow_ctx->logical = logical; | 
 | 4352 | 	nocow_ctx->len = len; | 
 | 4353 | 	nocow_ctx->mirror_num = mirror_num; | 
 | 4354 | 	nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; | 
| Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 4355 | 	btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper, | 
 | 4356 | 			copy_nocow_pages_worker, NULL, NULL); | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4357 | 	INIT_LIST_HEAD(&nocow_ctx->inodes); | 
| Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 4358 | 	btrfs_queue_work(fs_info->scrub_nocow_workers, | 
 | 4359 | 			 &nocow_ctx->work); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4360 |  | 
 | 4361 | 	return 0; | 
 | 4362 | } | 
 | 4363 |  | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4364 | static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx) | 
 | 4365 | { | 
 | 4366 | 	struct scrub_copy_nocow_ctx *nocow_ctx = ctx; | 
 | 4367 | 	struct scrub_nocow_inode *nocow_inode; | 
 | 4368 |  | 
 | 4369 | 	nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS); | 
 | 4370 | 	if (!nocow_inode) | 
 | 4371 | 		return -ENOMEM; | 
 | 4372 | 	nocow_inode->inum = inum; | 
 | 4373 | 	nocow_inode->offset = offset; | 
 | 4374 | 	nocow_inode->root = root; | 
 | 4375 | 	list_add_tail(&nocow_inode->list, &nocow_ctx->inodes); | 
 | 4376 | 	return 0; | 
 | 4377 | } | 
 | 4378 |  | 
 | 4379 | #define COPY_COMPLETE 1 | 
 | 4380 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4381 | static void copy_nocow_pages_worker(struct btrfs_work *work) | 
 | 4382 | { | 
 | 4383 | 	struct scrub_copy_nocow_ctx *nocow_ctx = | 
 | 4384 | 		container_of(work, struct scrub_copy_nocow_ctx, work); | 
 | 4385 | 	struct scrub_ctx *sctx = nocow_ctx->sctx; | 
| Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 4386 | 	struct btrfs_fs_info *fs_info = sctx->fs_info; | 
 | 4387 | 	struct btrfs_root *root = fs_info->extent_root; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4388 | 	u64 logical = nocow_ctx->logical; | 
 | 4389 | 	u64 len = nocow_ctx->len; | 
 | 4390 | 	int mirror_num = nocow_ctx->mirror_num; | 
 | 4391 | 	u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; | 
 | 4392 | 	int ret; | 
 | 4393 | 	struct btrfs_trans_handle *trans = NULL; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4394 | 	struct btrfs_path *path; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4395 | 	int not_written = 0; | 
 | 4396 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4397 | 	path = btrfs_alloc_path(); | 
 | 4398 | 	if (!path) { | 
 | 4399 | 		spin_lock(&sctx->stat_lock); | 
 | 4400 | 		sctx->stat.malloc_errors++; | 
 | 4401 | 		spin_unlock(&sctx->stat_lock); | 
 | 4402 | 		not_written = 1; | 
 | 4403 | 		goto out; | 
 | 4404 | 	} | 
 | 4405 |  | 
 | 4406 | 	trans = btrfs_join_transaction(root); | 
 | 4407 | 	if (IS_ERR(trans)) { | 
 | 4408 | 		not_written = 1; | 
 | 4409 | 		goto out; | 
 | 4410 | 	} | 
 | 4411 |  | 
 | 4412 | 	ret = iterate_inodes_from_logical(logical, fs_info, path, | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4413 | 					  record_inode_for_nocow, nocow_ctx); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4414 | 	if (ret != 0 && ret != -ENOENT) { | 
| Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 4415 | 		btrfs_warn(fs_info, | 
 | 4416 | 			   "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d", | 
 | 4417 | 			   logical, physical_for_dev_replace, len, mirror_num, | 
 | 4418 | 			   ret); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4419 | 		not_written = 1; | 
 | 4420 | 		goto out; | 
 | 4421 | 	} | 
 | 4422 |  | 
| Jeff Mahoney | 3a45bb2 | 2016-09-09 21:39:03 -0400 | [diff] [blame] | 4423 | 	btrfs_end_transaction(trans); | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4424 | 	trans = NULL; | 
 | 4425 | 	while (!list_empty(&nocow_ctx->inodes)) { | 
 | 4426 | 		struct scrub_nocow_inode *entry; | 
 | 4427 | 		entry = list_first_entry(&nocow_ctx->inodes, | 
 | 4428 | 					 struct scrub_nocow_inode, | 
 | 4429 | 					 list); | 
 | 4430 | 		list_del_init(&entry->list); | 
 | 4431 | 		ret = copy_nocow_pages_for_inode(entry->inum, entry->offset, | 
 | 4432 | 						 entry->root, nocow_ctx); | 
 | 4433 | 		kfree(entry); | 
 | 4434 | 		if (ret == COPY_COMPLETE) { | 
 | 4435 | 			ret = 0; | 
 | 4436 | 			break; | 
 | 4437 | 		} else if (ret) { | 
 | 4438 | 			break; | 
 | 4439 | 		} | 
 | 4440 | 	} | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4441 | out: | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4442 | 	while (!list_empty(&nocow_ctx->inodes)) { | 
 | 4443 | 		struct scrub_nocow_inode *entry; | 
 | 4444 | 		entry = list_first_entry(&nocow_ctx->inodes, | 
 | 4445 | 					 struct scrub_nocow_inode, | 
 | 4446 | 					 list); | 
 | 4447 | 		list_del_init(&entry->list); | 
 | 4448 | 		kfree(entry); | 
 | 4449 | 	} | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4450 | 	if (trans && !IS_ERR(trans)) | 
| Jeff Mahoney | 3a45bb2 | 2016-09-09 21:39:03 -0400 | [diff] [blame] | 4451 | 		btrfs_end_transaction(trans); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4452 | 	if (not_written) | 
 | 4453 | 		btrfs_dev_replace_stats_inc(&fs_info->dev_replace. | 
 | 4454 | 					    num_uncorrectable_read_errors); | 
 | 4455 |  | 
 | 4456 | 	btrfs_free_path(path); | 
 | 4457 | 	kfree(nocow_ctx); | 
 | 4458 |  | 
 | 4459 | 	scrub_pending_trans_workers_dec(sctx); | 
 | 4460 | } | 
 | 4461 |  | 
| Nikolay Borisov | 1c8c9c5 | 2017-02-20 13:51:05 +0200 | [diff] [blame] | 4462 | static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len, | 
| Gui Hecheng | 3215924 | 2014-11-10 15:36:08 +0800 | [diff] [blame] | 4463 | 				 u64 logical) | 
 | 4464 | { | 
 | 4465 | 	struct extent_state *cached_state = NULL; | 
 | 4466 | 	struct btrfs_ordered_extent *ordered; | 
 | 4467 | 	struct extent_io_tree *io_tree; | 
 | 4468 | 	struct extent_map *em; | 
 | 4469 | 	u64 lockstart = start, lockend = start + len - 1; | 
 | 4470 | 	int ret = 0; | 
 | 4471 |  | 
| Nikolay Borisov | 1c8c9c5 | 2017-02-20 13:51:05 +0200 | [diff] [blame] | 4472 | 	io_tree = &inode->io_tree; | 
| Gui Hecheng | 3215924 | 2014-11-10 15:36:08 +0800 | [diff] [blame] | 4473 |  | 
| David Sterba | ff13db4 | 2015-12-03 14:30:40 +0100 | [diff] [blame] | 4474 | 	lock_extent_bits(io_tree, lockstart, lockend, &cached_state); | 
| Nikolay Borisov | 1c8c9c5 | 2017-02-20 13:51:05 +0200 | [diff] [blame] | 4475 | 	ordered = btrfs_lookup_ordered_range(inode, lockstart, len); | 
| Gui Hecheng | 3215924 | 2014-11-10 15:36:08 +0800 | [diff] [blame] | 4476 | 	if (ordered) { | 
 | 4477 | 		btrfs_put_ordered_extent(ordered); | 
 | 4478 | 		ret = 1; | 
 | 4479 | 		goto out_unlock; | 
 | 4480 | 	} | 
 | 4481 |  | 
 | 4482 | 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0); | 
 | 4483 | 	if (IS_ERR(em)) { | 
 | 4484 | 		ret = PTR_ERR(em); | 
 | 4485 | 		goto out_unlock; | 
 | 4486 | 	} | 
 | 4487 |  | 
 | 4488 | 	/* | 
 | 4489 | 	 * This extent does not actually cover the logical extent anymore, | 
 | 4490 | 	 * move on to the next inode. | 
 | 4491 | 	 */ | 
 | 4492 | 	if (em->block_start > logical || | 
 | 4493 | 	    em->block_start + em->block_len < logical + len) { | 
 | 4494 | 		free_extent_map(em); | 
 | 4495 | 		ret = 1; | 
 | 4496 | 		goto out_unlock; | 
 | 4497 | 	} | 
 | 4498 | 	free_extent_map(em); | 
 | 4499 |  | 
 | 4500 | out_unlock: | 
 | 4501 | 	unlock_extent_cached(io_tree, lockstart, lockend, &cached_state, | 
 | 4502 | 			     GFP_NOFS); | 
 | 4503 | 	return ret; | 
 | 4504 | } | 
 | 4505 |  | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4506 | static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, | 
 | 4507 | 				      struct scrub_copy_nocow_ctx *nocow_ctx) | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4508 | { | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 4509 | 	struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4510 | 	struct btrfs_key key; | 
| Miao Xie | 826aa0a | 2013-06-27 18:50:59 +0800 | [diff] [blame] | 4511 | 	struct inode *inode; | 
 | 4512 | 	struct page *page; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4513 | 	struct btrfs_root *local_root; | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4514 | 	struct extent_io_tree *io_tree; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4515 | 	u64 physical_for_dev_replace; | 
| Gui Hecheng | 3215924 | 2014-11-10 15:36:08 +0800 | [diff] [blame] | 4516 | 	u64 nocow_ctx_logical; | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4517 | 	u64 len = nocow_ctx->len; | 
| Miao Xie | 826aa0a | 2013-06-27 18:50:59 +0800 | [diff] [blame] | 4518 | 	unsigned long index; | 
| Liu Bo | 6f1c360 | 2013-01-29 03:22:10 +0000 | [diff] [blame] | 4519 | 	int srcu_index; | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4520 | 	int ret = 0; | 
 | 4521 | 	int err = 0; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4522 |  | 
 | 4523 | 	key.objectid = root; | 
 | 4524 | 	key.type = BTRFS_ROOT_ITEM_KEY; | 
 | 4525 | 	key.offset = (u64)-1; | 
| Liu Bo | 6f1c360 | 2013-01-29 03:22:10 +0000 | [diff] [blame] | 4526 |  | 
 | 4527 | 	srcu_index = srcu_read_lock(&fs_info->subvol_srcu); | 
 | 4528 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4529 | 	local_root = btrfs_read_fs_root_no_name(fs_info, &key); | 
| Liu Bo | 6f1c360 | 2013-01-29 03:22:10 +0000 | [diff] [blame] | 4530 | 	if (IS_ERR(local_root)) { | 
 | 4531 | 		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4532 | 		return PTR_ERR(local_root); | 
| Liu Bo | 6f1c360 | 2013-01-29 03:22:10 +0000 | [diff] [blame] | 4533 | 	} | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4534 |  | 
 | 4535 | 	key.type = BTRFS_INODE_ITEM_KEY; | 
 | 4536 | 	key.objectid = inum; | 
 | 4537 | 	key.offset = 0; | 
 | 4538 | 	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); | 
| Liu Bo | 6f1c360 | 2013-01-29 03:22:10 +0000 | [diff] [blame] | 4539 | 	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4540 | 	if (IS_ERR(inode)) | 
 | 4541 | 		return PTR_ERR(inode); | 
 | 4542 |  | 
| Miao Xie | edd1400 | 2013-06-27 18:51:00 +0800 | [diff] [blame] | 4543 | 	/* Avoid truncate/dio/punch hole.. */ | 
| Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 4544 | 	inode_lock(inode); | 
| Miao Xie | edd1400 | 2013-06-27 18:51:00 +0800 | [diff] [blame] | 4545 | 	inode_dio_wait(inode); | 
 | 4546 |  | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4547 | 	physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4548 | 	io_tree = &BTRFS_I(inode)->io_tree; | 
| Gui Hecheng | 3215924 | 2014-11-10 15:36:08 +0800 | [diff] [blame] | 4549 | 	nocow_ctx_logical = nocow_ctx->logical; | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4550 |  | 
| Nikolay Borisov | 1c8c9c5 | 2017-02-20 13:51:05 +0200 | [diff] [blame] | 4551 | 	ret = check_extent_to_block(BTRFS_I(inode), offset, len, | 
 | 4552 | 			nocow_ctx_logical); | 
| Gui Hecheng | 3215924 | 2014-11-10 15:36:08 +0800 | [diff] [blame] | 4553 | 	if (ret) { | 
 | 4554 | 		ret = ret > 0 ? 0 : ret; | 
 | 4555 | 		goto out; | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4556 | 	} | 
 | 4557 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 4558 | 	while (len >= PAGE_SIZE) { | 
 | 4559 | 		index = offset >> PAGE_SHIFT; | 
| Miao Xie | edd1400 | 2013-06-27 18:51:00 +0800 | [diff] [blame] | 4560 | again: | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4561 | 		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); | 
 | 4562 | 		if (!page) { | 
| Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 4563 | 			btrfs_err(fs_info, "find_or_create_page() failed"); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4564 | 			ret = -ENOMEM; | 
| Miao Xie | 826aa0a | 2013-06-27 18:50:59 +0800 | [diff] [blame] | 4565 | 			goto out; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4566 | 		} | 
 | 4567 |  | 
 | 4568 | 		if (PageUptodate(page)) { | 
 | 4569 | 			if (PageDirty(page)) | 
 | 4570 | 				goto next_page; | 
 | 4571 | 		} else { | 
 | 4572 | 			ClearPageError(page); | 
| Gui Hecheng | 3215924 | 2014-11-10 15:36:08 +0800 | [diff] [blame] | 4573 | 			err = extent_read_full_page(io_tree, page, | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4574 | 							   btrfs_get_extent, | 
 | 4575 | 							   nocow_ctx->mirror_num); | 
| Miao Xie | 826aa0a | 2013-06-27 18:50:59 +0800 | [diff] [blame] | 4576 | 			if (err) { | 
 | 4577 | 				ret = err; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4578 | 				goto next_page; | 
 | 4579 | 			} | 
| Miao Xie | edd1400 | 2013-06-27 18:51:00 +0800 | [diff] [blame] | 4580 |  | 
| Miao Xie | 26b25891 | 2013-06-27 18:50:58 +0800 | [diff] [blame] | 4581 | 			lock_page(page); | 
| Miao Xie | edd1400 | 2013-06-27 18:51:00 +0800 | [diff] [blame] | 4582 | 			/* | 
 | 4583 | 			 * If the page has been remove from the page cache, | 
 | 4584 | 			 * the data on it is meaningless, because it may be | 
 | 4585 | 			 * old one, the new data may be written into the new | 
 | 4586 | 			 * page in the page cache. | 
 | 4587 | 			 */ | 
 | 4588 | 			if (page->mapping != inode->i_mapping) { | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4589 | 				unlock_page(page); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 4590 | 				put_page(page); | 
| Miao Xie | edd1400 | 2013-06-27 18:51:00 +0800 | [diff] [blame] | 4591 | 				goto again; | 
 | 4592 | 			} | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4593 | 			if (!PageUptodate(page)) { | 
 | 4594 | 				ret = -EIO; | 
 | 4595 | 				goto next_page; | 
 | 4596 | 			} | 
 | 4597 | 		} | 
| Gui Hecheng | 3215924 | 2014-11-10 15:36:08 +0800 | [diff] [blame] | 4598 |  | 
| Nikolay Borisov | 1c8c9c5 | 2017-02-20 13:51:05 +0200 | [diff] [blame] | 4599 | 		ret = check_extent_to_block(BTRFS_I(inode), offset, len, | 
| Gui Hecheng | 3215924 | 2014-11-10 15:36:08 +0800 | [diff] [blame] | 4600 | 					    nocow_ctx_logical); | 
 | 4601 | 		if (ret) { | 
 | 4602 | 			ret = ret > 0 ? 0 : ret; | 
 | 4603 | 			goto next_page; | 
 | 4604 | 		} | 
 | 4605 |  | 
| Miao Xie | 826aa0a | 2013-06-27 18:50:59 +0800 | [diff] [blame] | 4606 | 		err = write_page_nocow(nocow_ctx->sctx, | 
 | 4607 | 				       physical_for_dev_replace, page); | 
 | 4608 | 		if (err) | 
 | 4609 | 			ret = err; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4610 | next_page: | 
| Miao Xie | 826aa0a | 2013-06-27 18:50:59 +0800 | [diff] [blame] | 4611 | 		unlock_page(page); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 4612 | 		put_page(page); | 
| Miao Xie | 826aa0a | 2013-06-27 18:50:59 +0800 | [diff] [blame] | 4613 |  | 
 | 4614 | 		if (ret) | 
 | 4615 | 			break; | 
 | 4616 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 4617 | 		offset += PAGE_SIZE; | 
 | 4618 | 		physical_for_dev_replace += PAGE_SIZE; | 
 | 4619 | 		nocow_ctx_logical += PAGE_SIZE; | 
 | 4620 | 		len -= PAGE_SIZE; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4621 | 	} | 
| Josef Bacik | 652f25a | 2013-09-12 16:58:28 -0400 | [diff] [blame] | 4622 | 	ret = COPY_COMPLETE; | 
| Miao Xie | 826aa0a | 2013-06-27 18:50:59 +0800 | [diff] [blame] | 4623 | out: | 
| Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 4624 | 	inode_unlock(inode); | 
| Miao Xie | 826aa0a | 2013-06-27 18:50:59 +0800 | [diff] [blame] | 4625 | 	iput(inode); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4626 | 	return ret; | 
 | 4627 | } | 
 | 4628 |  | 
 | 4629 | static int write_page_nocow(struct scrub_ctx *sctx, | 
 | 4630 | 			    u64 physical_for_dev_replace, struct page *page) | 
 | 4631 | { | 
 | 4632 | 	struct bio *bio; | 
 | 4633 | 	struct btrfs_device *dev; | 
 | 4634 | 	int ret; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4635 |  | 
 | 4636 | 	dev = sctx->wr_ctx.tgtdev; | 
 | 4637 | 	if (!dev) | 
 | 4638 | 		return -EIO; | 
 | 4639 | 	if (!dev->bdev) { | 
| Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 4640 | 		btrfs_warn_rl(dev->fs_info, | 
| David Sterba | 9464732 | 2015-10-08 11:01:36 +0200 | [diff] [blame] | 4641 | 			"scrub write_page_nocow(bdev == NULL) is unexpected"); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4642 | 		return -EIO; | 
 | 4643 | 	} | 
| Chris Mason | 9be3395 | 2013-05-17 18:30:14 -0400 | [diff] [blame] | 4644 | 	bio = btrfs_io_bio_alloc(GFP_NOFS, 1); | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4645 | 	if (!bio) { | 
 | 4646 | 		spin_lock(&sctx->stat_lock); | 
 | 4647 | 		sctx->stat.malloc_errors++; | 
 | 4648 | 		spin_unlock(&sctx->stat_lock); | 
 | 4649 | 		return -ENOMEM; | 
 | 4650 | 	} | 
| Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 4651 | 	bio->bi_iter.bi_size = 0; | 
 | 4652 | 	bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4653 | 	bio->bi_bdev = dev->bdev; | 
| Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 4654 | 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 4655 | 	ret = bio_add_page(bio, page, PAGE_SIZE, 0); | 
 | 4656 | 	if (ret != PAGE_SIZE) { | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4657 | leave_with_eio: | 
 | 4658 | 		bio_put(bio); | 
 | 4659 | 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); | 
 | 4660 | 		return -EIO; | 
 | 4661 | 	} | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4662 |  | 
| Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 4663 | 	if (btrfsic_submit_bio_wait(bio)) | 
| Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4664 | 		goto leave_with_eio; | 
 | 4665 |  | 
 | 4666 | 	bio_put(bio); | 
 | 4667 | 	return 0; | 
 | 4668 | } |