blob: e2c7234931bc1fda713f8d12db1918f7efcc5b5c [file] [log] [blame]
Josef Bacik3407ef52011-03-24 13:54:24 +00001/*
2 * Copyright (C) 2003 Sistina Software (UK) Limited.
Mike Snitzera3998792011-08-02 12:32:06 +01003 * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
Josef Bacik3407ef52011-03-24 13:54:24 +00004 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/bio.h>
14#include <linux/slab.h>
15
16#define DM_MSG_PREFIX "flakey"
17
Mike Snitzera3998792011-08-02 12:32:06 +010018#define all_corrupt_bio_flags_match(bio, fc) \
Jens Axboe1eff9d32016-08-05 15:35:16 -060019 (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
Mike Snitzera3998792011-08-02 12:32:06 +010020
Josef Bacik3407ef52011-03-24 13:54:24 +000021/*
22 * Flakey: Used for testing only, simulates intermittent,
23 * catastrophic device failure.
24 */
25struct flakey_c {
26 struct dm_dev *dev;
27 unsigned long start_time;
28 sector_t start;
29 unsigned up_interval;
30 unsigned down_interval;
Mike Snitzerb26f5e32011-08-02 12:32:05 +010031 unsigned long flags;
Mike Snitzera3998792011-08-02 12:32:06 +010032 unsigned corrupt_bio_byte;
33 unsigned corrupt_bio_rw;
34 unsigned corrupt_bio_value;
35 unsigned corrupt_bio_flags;
Josef Bacik3407ef52011-03-24 13:54:24 +000036};
37
Mike Snitzerb26f5e32011-08-02 12:32:05 +010038enum feature_flag_bits {
Mike Snitzeref548c52016-12-13 14:54:50 -050039 DROP_WRITES,
40 ERROR_WRITES
Mike Snitzerb26f5e32011-08-02 12:32:05 +010041};
42
Mikulas Patockac7cfdf52012-12-21 20:23:39 +000043struct per_bio_data {
44 bool bio_submitted;
45};
46
Mike Snitzerb26f5e32011-08-02 12:32:05 +010047static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
48 struct dm_target *ti)
Mike Snitzerdfd068b2011-08-02 12:32:05 +010049{
50 int r;
51 unsigned argc;
52 const char *arg_name;
53
54 static struct dm_arg _args[] = {
Mike Snitzera3998792011-08-02 12:32:06 +010055 {0, 6, "Invalid number of feature args"},
56 {1, UINT_MAX, "Invalid corrupt bio byte"},
57 {0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
58 {0, UINT_MAX, "Invalid corrupt bio flags mask"},
Mike Snitzerdfd068b2011-08-02 12:32:05 +010059 };
60
61 /* No feature arguments supplied. */
62 if (!as->argc)
63 return 0;
64
65 r = dm_read_arg_group(_args, as, &argc, &ti->error);
66 if (r)
Mike Snitzera3998792011-08-02 12:32:06 +010067 return r;
Mike Snitzerdfd068b2011-08-02 12:32:05 +010068
Mike Snitzera3998792011-08-02 12:32:06 +010069 while (argc) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +010070 arg_name = dm_shift_arg(as);
71 argc--;
72
Mike Snitzerb26f5e32011-08-02 12:32:05 +010073 /*
74 * drop_writes
75 */
76 if (!strcasecmp(arg_name, "drop_writes")) {
77 if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
78 ti->error = "Feature drop_writes duplicated";
79 return -EINVAL;
Mike Snitzeref548c52016-12-13 14:54:50 -050080 } else if (test_bit(ERROR_WRITES, &fc->flags)) {
81 ti->error = "Feature drop_writes conflicts with feature error_writes";
82 return -EINVAL;
83 }
84
85 continue;
86 }
87
88 /*
89 * error_writes
90 */
91 if (!strcasecmp(arg_name, "error_writes")) {
92 if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
93 ti->error = "Feature error_writes duplicated";
94 return -EINVAL;
95
96 } else if (test_bit(DROP_WRITES, &fc->flags)) {
97 ti->error = "Feature error_writes conflicts with feature drop_writes";
98 return -EINVAL;
Mike Snitzerb26f5e32011-08-02 12:32:05 +010099 }
100
101 continue;
102 }
103
Mike Snitzera3998792011-08-02 12:32:06 +0100104 /*
105 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
106 */
107 if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
Mike Snitzer68e58a22011-09-25 23:26:15 +0100108 if (!argc) {
Mike Snitzera3998792011-08-02 12:32:06 +0100109 ti->error = "Feature corrupt_bio_byte requires parameters";
Mike Snitzer68e58a22011-09-25 23:26:15 +0100110 return -EINVAL;
111 }
Mike Snitzera3998792011-08-02 12:32:06 +0100112
113 r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
114 if (r)
115 return r;
116 argc--;
117
118 /*
119 * Direction r or w?
120 */
121 arg_name = dm_shift_arg(as);
122 if (!strcasecmp(arg_name, "w"))
123 fc->corrupt_bio_rw = WRITE;
124 else if (!strcasecmp(arg_name, "r"))
125 fc->corrupt_bio_rw = READ;
126 else {
127 ti->error = "Invalid corrupt bio direction (r or w)";
128 return -EINVAL;
129 }
130 argc--;
131
132 /*
133 * Value of byte (0-255) to write in place of correct one.
134 */
135 r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
136 if (r)
137 return r;
138 argc--;
139
140 /*
141 * Only corrupt bios with these flags set.
142 */
143 r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
144 if (r)
145 return r;
146 argc--;
147
148 continue;
149 }
150
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100151 ti->error = "Unrecognised flakey feature requested";
Mike Snitzera3998792011-08-02 12:32:06 +0100152 return -EINVAL;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100153 }
154
Mike Snitzera3998792011-08-02 12:32:06 +0100155 if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
156 ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
157 return -EINVAL;
Mike Snitzeref548c52016-12-13 14:54:50 -0500158
159 } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
160 ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
161 return -EINVAL;
Mike Snitzera3998792011-08-02 12:32:06 +0100162 }
163
164 return 0;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100165}
166
Josef Bacik3407ef52011-03-24 13:54:24 +0000167/*
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100168 * Construct a flakey mapping:
169 * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100170 *
171 * Feature args:
172 * [drop_writes]
Mike Snitzera3998792011-08-02 12:32:06 +0100173 * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
174 *
175 * Nth_byte starts from 1 for the first byte.
176 * Direction is r for READ or w for WRITE.
177 * bio_flags is ignored if 0.
Josef Bacik3407ef52011-03-24 13:54:24 +0000178 */
179static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
180{
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100181 static struct dm_arg _args[] = {
182 {0, UINT_MAX, "Invalid up interval"},
183 {0, UINT_MAX, "Invalid down interval"},
184 };
Josef Bacik3407ef52011-03-24 13:54:24 +0000185
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100186 int r;
187 struct flakey_c *fc;
188 unsigned long long tmpll;
189 struct dm_arg_set as;
190 const char *devname;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100191 char dummy;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100192
193 as.argc = argc;
194 as.argv = argv;
195
196 if (argc < 4) {
197 ti->error = "Invalid argument count";
Josef Bacik3407ef52011-03-24 13:54:24 +0000198 return -EINVAL;
199 }
200
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100201 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
Josef Bacik3407ef52011-03-24 13:54:24 +0000202 if (!fc) {
Alasdair G Kergon75e3a0f2013-07-10 23:41:17 +0100203 ti->error = "Cannot allocate context";
Josef Bacik3407ef52011-03-24 13:54:24 +0000204 return -ENOMEM;
205 }
206 fc->start_time = jiffies;
207
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100208 devname = dm_shift_arg(&as);
Josef Bacik3407ef52011-03-24 13:54:24 +0000209
Vivek Goyale80d1c82015-07-31 09:20:36 -0400210 r = -EINVAL;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100211 if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100212 ti->error = "Invalid device sector";
Josef Bacik3407ef52011-03-24 13:54:24 +0000213 goto bad;
214 }
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100215 fc->start = tmpll;
Josef Bacik3407ef52011-03-24 13:54:24 +0000216
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100217 r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
218 if (r)
Josef Bacik3407ef52011-03-24 13:54:24 +0000219 goto bad;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100220
221 r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
222 if (r)
223 goto bad;
Josef Bacik3407ef52011-03-24 13:54:24 +0000224
225 if (!(fc->up_interval + fc->down_interval)) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100226 ti->error = "Total (up + down) interval is zero";
Wei Yongjunbff7e062016-08-08 14:09:27 +0000227 r = -EINVAL;
Josef Bacik3407ef52011-03-24 13:54:24 +0000228 goto bad;
229 }
230
231 if (fc->up_interval + fc->down_interval < fc->up_interval) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100232 ti->error = "Interval overflow";
Wei Yongjunbff7e062016-08-08 14:09:27 +0000233 r = -EINVAL;
Josef Bacik3407ef52011-03-24 13:54:24 +0000234 goto bad;
235 }
236
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100237 r = parse_features(&as, fc, ti);
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100238 if (r)
239 goto bad;
240
Vivek Goyale80d1c82015-07-31 09:20:36 -0400241 r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
242 if (r) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100243 ti->error = "Device lookup failed";
Josef Bacik3407ef52011-03-24 13:54:24 +0000244 goto bad;
245 }
246
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +0000247 ti->num_flush_bios = 1;
248 ti->num_discard_bios = 1;
Mike Snitzer30187e12016-01-31 13:28:26 -0500249 ti->per_io_data_size = sizeof(struct per_bio_data);
Josef Bacik3407ef52011-03-24 13:54:24 +0000250 ti->private = fc;
251 return 0;
252
253bad:
254 kfree(fc);
Vivek Goyale80d1c82015-07-31 09:20:36 -0400255 return r;
Josef Bacik3407ef52011-03-24 13:54:24 +0000256}
257
258static void flakey_dtr(struct dm_target *ti)
259{
260 struct flakey_c *fc = ti->private;
261
262 dm_put_device(ti, fc->dev);
263 kfree(fc);
264}
265
266static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
267{
268 struct flakey_c *fc = ti->private;
269
Mike Snitzer30e41712011-08-02 12:32:05 +0100270 return fc->start + dm_target_offset(ti, bi_sector);
Josef Bacik3407ef52011-03-24 13:54:24 +0000271}
272
273static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
274{
275 struct flakey_c *fc = ti->private;
276
277 bio->bi_bdev = fc->dev->bdev;
Damien Le Moal124c4452017-05-08 16:40:49 -0700278 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
Kent Overstreet4f024f32013-10-11 15:44:27 -0700279 bio->bi_iter.bi_sector =
280 flakey_map_sector(ti, bio->bi_iter.bi_sector);
Josef Bacik3407ef52011-03-24 13:54:24 +0000281}
282
Mike Snitzera3998792011-08-02 12:32:06 +0100283static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
284{
285 unsigned bio_bytes = bio_cur_bytes(bio);
286 char *data = bio_data(bio);
287
288 /*
289 * Overwrite the Nth byte of the data returned.
290 */
291 if (data && bio_bytes >= fc->corrupt_bio_byte) {
292 data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
293
294 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
Jens Axboe1eff9d32016-08-05 15:35:16 -0600295 "(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n",
Mike Snitzera3998792011-08-02 12:32:06 +0100296 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
Jens Axboe1eff9d32016-08-05 15:35:16 -0600297 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
Kent Overstreet4f024f32013-10-11 15:44:27 -0700298 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
Mike Snitzera3998792011-08-02 12:32:06 +0100299 }
300}
301
Mikulas Patocka7de3ee52012-12-21 20:23:41 +0000302static int flakey_map(struct dm_target *ti, struct bio *bio)
Josef Bacik3407ef52011-03-24 13:54:24 +0000303{
304 struct flakey_c *fc = ti->private;
305 unsigned elapsed;
Mikulas Patockac7cfdf52012-12-21 20:23:39 +0000306 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
307 pb->bio_submitted = false;
Josef Bacik3407ef52011-03-24 13:54:24 +0000308
Damien Le Moal124c4452017-05-08 16:40:49 -0700309 /* Do not fail reset zone */
310 if (bio_op(bio) == REQ_OP_ZONE_RESET)
311 goto map_bio;
312
313 /* We need to remap reported zones, so remember the BIO iter */
314 if (bio_op(bio) == REQ_OP_ZONE_REPORT)
315 goto map_bio;
316
Josef Bacik3407ef52011-03-24 13:54:24 +0000317 /* Are we alive ? */
318 elapsed = (jiffies - fc->start_time) / HZ;
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100319 if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
Mike Snitzera3998792011-08-02 12:32:06 +0100320 /*
321 * Flag this bio as submitted while down.
322 */
Mikulas Patockac7cfdf52012-12-21 20:23:39 +0000323 pb->bio_submitted = true;
Josef Bacik3407ef52011-03-24 13:54:24 +0000324
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100325 /*
Mike Snitzeref548c52016-12-13 14:54:50 -0500326 * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
Mike Snitzer299f6232016-08-24 21:12:58 -0400327 * Otherwise, flakey_end_io() will decide if the reads should be modified.
Mike Snitzera3998792011-08-02 12:32:06 +0100328 */
Mike Snitzer99f3c902016-07-29 13:19:55 -0400329 if (bio_data_dir(bio) == READ) {
Mike Snitzeref548c52016-12-13 14:54:50 -0500330 if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
331 !test_bit(ERROR_WRITES, &fc->flags))
Christoph Hellwig846785e2017-06-03 09:38:02 +0200332 return DM_MAPIO_KILL;
Mike Snitzer299f6232016-08-24 21:12:58 -0400333 goto map_bio;
Mike Snitzer99f3c902016-07-29 13:19:55 -0400334 }
Mike Snitzera3998792011-08-02 12:32:06 +0100335
336 /*
Mike Snitzeref548c52016-12-13 14:54:50 -0500337 * Drop or error writes?
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100338 */
339 if (test_bit(DROP_WRITES, &fc->flags)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200340 bio_endio(bio);
Mike Snitzera3998792011-08-02 12:32:06 +0100341 return DM_MAPIO_SUBMITTED;
342 }
Mike Snitzeref548c52016-12-13 14:54:50 -0500343 else if (test_bit(ERROR_WRITES, &fc->flags)) {
344 bio_io_error(bio);
345 return DM_MAPIO_SUBMITTED;
346 }
Mike Snitzera3998792011-08-02 12:32:06 +0100347
348 /*
349 * Corrupt matching writes.
350 */
351 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
352 if (all_corrupt_bio_flags_match(bio, fc))
353 corrupt_bio_data(bio, fc);
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100354 goto map_bio;
355 }
356
357 /*
Mike Snitzera3998792011-08-02 12:32:06 +0100358 * By default, error all I/O.
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100359 */
Christoph Hellwig846785e2017-06-03 09:38:02 +0200360 return DM_MAPIO_KILL;
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100361 }
362
363map_bio:
Josef Bacik3407ef52011-03-24 13:54:24 +0000364 flakey_map_bio(ti, bio);
365
366 return DM_MAPIO_REMAPPED;
367}
368
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200369static int flakey_end_io(struct dm_target *ti, struct bio *bio,
Damien Le Moal124c4452017-05-08 16:40:49 -0700370 blk_status_t *error)
Mike Snitzera3998792011-08-02 12:32:06 +0100371{
372 struct flakey_c *fc = ti->private;
Mikulas Patockac7cfdf52012-12-21 20:23:39 +0000373 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
Mike Snitzera3998792011-08-02 12:32:06 +0100374
Damien Le Moal124c4452017-05-08 16:40:49 -0700375 if (bio_op(bio) == REQ_OP_ZONE_RESET)
376 return DM_ENDIO_DONE;
377
378 if (bio_op(bio) == REQ_OP_ZONE_REPORT) {
379 dm_remap_zone_report(ti, bio, fc->start);
380 return DM_ENDIO_DONE;
381 }
382
Christoph Hellwig1be56902017-06-03 09:38:03 +0200383 if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
Mike Snitzer299f6232016-08-24 21:12:58 -0400384 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
385 all_corrupt_bio_flags_match(bio, fc)) {
386 /*
387 * Corrupt successful matching READs while in down state.
388 */
Mike Snitzer99f3c902016-07-29 13:19:55 -0400389 corrupt_bio_data(bio, fc);
Mike Snitzer299f6232016-08-24 21:12:58 -0400390
Mike Snitzeref548c52016-12-13 14:54:50 -0500391 } else if (!test_bit(DROP_WRITES, &fc->flags) &&
392 !test_bit(ERROR_WRITES, &fc->flags)) {
Mike Snitzer299f6232016-08-24 21:12:58 -0400393 /*
394 * Error read during the down_interval if drop_writes
Mike Snitzeref548c52016-12-13 14:54:50 -0500395 * and error_writes were not configured.
Mike Snitzer299f6232016-08-24 21:12:58 -0400396 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200397 *error = BLK_STS_IOERR;
Mike Snitzer299f6232016-08-24 21:12:58 -0400398 }
Mike Snitzer99f3c902016-07-29 13:19:55 -0400399 }
Mike Snitzera3998792011-08-02 12:32:06 +0100400
Christoph Hellwig1be56902017-06-03 09:38:03 +0200401 return DM_ENDIO_DONE;
Mike Snitzera3998792011-08-02 12:32:06 +0100402}
403
Mikulas Patockafd7c0922013-03-01 22:45:44 +0000404static void flakey_status(struct dm_target *ti, status_type_t type,
405 unsigned status_flags, char *result, unsigned maxlen)
Josef Bacik3407ef52011-03-24 13:54:24 +0000406{
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100407 unsigned sz = 0;
Josef Bacik3407ef52011-03-24 13:54:24 +0000408 struct flakey_c *fc = ti->private;
Mike Snitzeref548c52016-12-13 14:54:50 -0500409 unsigned drop_writes, error_writes;
Josef Bacik3407ef52011-03-24 13:54:24 +0000410
411 switch (type) {
412 case STATUSTYPE_INFO:
413 result[0] = '\0';
414 break;
415
416 case STATUSTYPE_TABLE:
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100417 DMEMIT("%s %llu %u %u ", fc->dev->name,
418 (unsigned long long)fc->start, fc->up_interval,
419 fc->down_interval);
420
421 drop_writes = test_bit(DROP_WRITES, &fc->flags);
Mike Snitzeref548c52016-12-13 14:54:50 -0500422 error_writes = test_bit(ERROR_WRITES, &fc->flags);
423 DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
Mike Snitzera3998792011-08-02 12:32:06 +0100424
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100425 if (drop_writes)
426 DMEMIT("drop_writes ");
Mike Snitzeref548c52016-12-13 14:54:50 -0500427 else if (error_writes)
428 DMEMIT("error_writes ");
Mike Snitzera3998792011-08-02 12:32:06 +0100429
430 if (fc->corrupt_bio_byte)
431 DMEMIT("corrupt_bio_byte %u %c %u %u ",
432 fc->corrupt_bio_byte,
433 (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
434 fc->corrupt_bio_value, fc->corrupt_bio_flags);
435
Josef Bacik3407ef52011-03-24 13:54:24 +0000436 break;
437 }
Josef Bacik3407ef52011-03-24 13:54:24 +0000438}
439
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200440static int flakey_prepare_ioctl(struct dm_target *ti,
441 struct block_device **bdev, fmode_t *mode)
Josef Bacik3407ef52011-03-24 13:54:24 +0000442{
443 struct flakey_c *fc = ti->private;
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200444
445 *bdev = fc->dev->bdev;
Josef Bacik3407ef52011-03-24 13:54:24 +0000446
Paolo Bonziniec8013b2012-01-12 16:01:29 +0100447 /*
448 * Only pass ioctls through if the device sizes match exactly.
449 */
450 if (fc->start ||
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200451 ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
452 return 1;
453 return 0;
Josef Bacik3407ef52011-03-24 13:54:24 +0000454}
455
Josef Bacik3407ef52011-03-24 13:54:24 +0000456static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
457{
458 struct flakey_c *fc = ti->private;
459
460 return fn(ti, fc->dev, fc->start, ti->len, data);
461}
462
463static struct target_type flakey_target = {
464 .name = "flakey",
Damien Le Moal124c4452017-05-08 16:40:49 -0700465 .version = {1, 5, 0},
466 .features = DM_TARGET_ZONED_HM,
Josef Bacik3407ef52011-03-24 13:54:24 +0000467 .module = THIS_MODULE,
468 .ctr = flakey_ctr,
469 .dtr = flakey_dtr,
470 .map = flakey_map,
Mike Snitzera3998792011-08-02 12:32:06 +0100471 .end_io = flakey_end_io,
Josef Bacik3407ef52011-03-24 13:54:24 +0000472 .status = flakey_status,
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200473 .prepare_ioctl = flakey_prepare_ioctl,
Josef Bacik3407ef52011-03-24 13:54:24 +0000474 .iterate_devices = flakey_iterate_devices,
475};
476
477static int __init dm_flakey_init(void)
478{
479 int r = dm_register_target(&flakey_target);
480
481 if (r < 0)
482 DMERR("register failed %d", r);
483
484 return r;
485}
486
487static void __exit dm_flakey_exit(void)
488{
489 dm_unregister_target(&flakey_target);
490}
491
492/* Module hooks */
493module_init(dm_flakey_init);
494module_exit(dm_flakey_exit);
495
496MODULE_DESCRIPTION(DM_NAME " flakey target");
497MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
498MODULE_LICENSE("GPL");