blob: 742c1fa870dae4369bf304f8bdd4d3e71fa470e5 [file] [log] [blame]
Josef Bacik3407ef52011-03-24 13:54:24 +00001/*
2 * Copyright (C) 2003 Sistina Software (UK) Limited.
Mike Snitzera3998792011-08-02 12:32:06 +01003 * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
Josef Bacik3407ef52011-03-24 13:54:24 +00004 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/bio.h>
14#include <linux/slab.h>
15
16#define DM_MSG_PREFIX "flakey"
17
Mike Snitzera3998792011-08-02 12:32:06 +010018#define all_corrupt_bio_flags_match(bio, fc) \
Jens Axboe1eff9d32016-08-05 15:35:16 -060019 (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
Mike Snitzera3998792011-08-02 12:32:06 +010020
Josef Bacik3407ef52011-03-24 13:54:24 +000021/*
22 * Flakey: Used for testing only, simulates intermittent,
23 * catastrophic device failure.
24 */
25struct flakey_c {
26 struct dm_dev *dev;
27 unsigned long start_time;
28 sector_t start;
29 unsigned up_interval;
30 unsigned down_interval;
Mike Snitzerb26f5e32011-08-02 12:32:05 +010031 unsigned long flags;
Mike Snitzera3998792011-08-02 12:32:06 +010032 unsigned corrupt_bio_byte;
33 unsigned corrupt_bio_rw;
34 unsigned corrupt_bio_value;
35 unsigned corrupt_bio_flags;
Josef Bacik3407ef52011-03-24 13:54:24 +000036};
37
Mike Snitzerb26f5e32011-08-02 12:32:05 +010038enum feature_flag_bits {
39 DROP_WRITES
40};
41
Mikulas Patockac7cfdf52012-12-21 20:23:39 +000042struct per_bio_data {
43 bool bio_submitted;
44};
45
Mike Snitzerb26f5e32011-08-02 12:32:05 +010046static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
47 struct dm_target *ti)
Mike Snitzerdfd068b2011-08-02 12:32:05 +010048{
49 int r;
50 unsigned argc;
51 const char *arg_name;
52
53 static struct dm_arg _args[] = {
Mike Snitzera3998792011-08-02 12:32:06 +010054 {0, 6, "Invalid number of feature args"},
55 {1, UINT_MAX, "Invalid corrupt bio byte"},
56 {0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
57 {0, UINT_MAX, "Invalid corrupt bio flags mask"},
Mike Snitzerdfd068b2011-08-02 12:32:05 +010058 };
59
60 /* No feature arguments supplied. */
61 if (!as->argc)
62 return 0;
63
64 r = dm_read_arg_group(_args, as, &argc, &ti->error);
65 if (r)
Mike Snitzera3998792011-08-02 12:32:06 +010066 return r;
Mike Snitzerdfd068b2011-08-02 12:32:05 +010067
Mike Snitzera3998792011-08-02 12:32:06 +010068 while (argc) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +010069 arg_name = dm_shift_arg(as);
70 argc--;
71
Mike Snitzerb26f5e32011-08-02 12:32:05 +010072 /*
73 * drop_writes
74 */
75 if (!strcasecmp(arg_name, "drop_writes")) {
76 if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
77 ti->error = "Feature drop_writes duplicated";
78 return -EINVAL;
79 }
80
81 continue;
82 }
83
Mike Snitzera3998792011-08-02 12:32:06 +010084 /*
85 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
86 */
87 if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
Mike Snitzer68e58a22011-09-25 23:26:15 +010088 if (!argc) {
Mike Snitzera3998792011-08-02 12:32:06 +010089 ti->error = "Feature corrupt_bio_byte requires parameters";
Mike Snitzer68e58a22011-09-25 23:26:15 +010090 return -EINVAL;
91 }
Mike Snitzera3998792011-08-02 12:32:06 +010092
93 r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
94 if (r)
95 return r;
96 argc--;
97
98 /*
99 * Direction r or w?
100 */
101 arg_name = dm_shift_arg(as);
102 if (!strcasecmp(arg_name, "w"))
103 fc->corrupt_bio_rw = WRITE;
104 else if (!strcasecmp(arg_name, "r"))
105 fc->corrupt_bio_rw = READ;
106 else {
107 ti->error = "Invalid corrupt bio direction (r or w)";
108 return -EINVAL;
109 }
110 argc--;
111
112 /*
113 * Value of byte (0-255) to write in place of correct one.
114 */
115 r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
116 if (r)
117 return r;
118 argc--;
119
120 /*
121 * Only corrupt bios with these flags set.
122 */
123 r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
124 if (r)
125 return r;
126 argc--;
127
128 continue;
129 }
130
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100131 ti->error = "Unrecognised flakey feature requested";
Mike Snitzera3998792011-08-02 12:32:06 +0100132 return -EINVAL;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100133 }
134
Mike Snitzera3998792011-08-02 12:32:06 +0100135 if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
136 ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
137 return -EINVAL;
138 }
139
140 return 0;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100141}
142
Josef Bacik3407ef52011-03-24 13:54:24 +0000143/*
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100144 * Construct a flakey mapping:
145 * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100146 *
147 * Feature args:
148 * [drop_writes]
Mike Snitzera3998792011-08-02 12:32:06 +0100149 * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
150 *
151 * Nth_byte starts from 1 for the first byte.
152 * Direction is r for READ or w for WRITE.
153 * bio_flags is ignored if 0.
Josef Bacik3407ef52011-03-24 13:54:24 +0000154 */
155static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
156{
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100157 static struct dm_arg _args[] = {
158 {0, UINT_MAX, "Invalid up interval"},
159 {0, UINT_MAX, "Invalid down interval"},
160 };
Josef Bacik3407ef52011-03-24 13:54:24 +0000161
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100162 int r;
163 struct flakey_c *fc;
164 unsigned long long tmpll;
165 struct dm_arg_set as;
166 const char *devname;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100167 char dummy;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100168
169 as.argc = argc;
170 as.argv = argv;
171
172 if (argc < 4) {
173 ti->error = "Invalid argument count";
Josef Bacik3407ef52011-03-24 13:54:24 +0000174 return -EINVAL;
175 }
176
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100177 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
Josef Bacik3407ef52011-03-24 13:54:24 +0000178 if (!fc) {
Alasdair G Kergon75e3a0f2013-07-10 23:41:17 +0100179 ti->error = "Cannot allocate context";
Josef Bacik3407ef52011-03-24 13:54:24 +0000180 return -ENOMEM;
181 }
182 fc->start_time = jiffies;
183
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100184 devname = dm_shift_arg(&as);
Josef Bacik3407ef52011-03-24 13:54:24 +0000185
Vivek Goyale80d1c82015-07-31 09:20:36 -0400186 r = -EINVAL;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100187 if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100188 ti->error = "Invalid device sector";
Josef Bacik3407ef52011-03-24 13:54:24 +0000189 goto bad;
190 }
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100191 fc->start = tmpll;
Josef Bacik3407ef52011-03-24 13:54:24 +0000192
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100193 r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
194 if (r)
Josef Bacik3407ef52011-03-24 13:54:24 +0000195 goto bad;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100196
197 r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
198 if (r)
199 goto bad;
Josef Bacik3407ef52011-03-24 13:54:24 +0000200
201 if (!(fc->up_interval + fc->down_interval)) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100202 ti->error = "Total (up + down) interval is zero";
Wei Yongjun3fae2a92016-08-08 14:09:27 +0000203 r = -EINVAL;
Josef Bacik3407ef52011-03-24 13:54:24 +0000204 goto bad;
205 }
206
207 if (fc->up_interval + fc->down_interval < fc->up_interval) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100208 ti->error = "Interval overflow";
Wei Yongjun3fae2a92016-08-08 14:09:27 +0000209 r = -EINVAL;
Josef Bacik3407ef52011-03-24 13:54:24 +0000210 goto bad;
211 }
212
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100213 r = parse_features(&as, fc, ti);
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100214 if (r)
215 goto bad;
216
Vivek Goyale80d1c82015-07-31 09:20:36 -0400217 r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
218 if (r) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100219 ti->error = "Device lookup failed";
Josef Bacik3407ef52011-03-24 13:54:24 +0000220 goto bad;
221 }
222
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +0000223 ti->num_flush_bios = 1;
224 ti->num_discard_bios = 1;
Mike Snitzer30187e12016-01-31 13:28:26 -0500225 ti->per_io_data_size = sizeof(struct per_bio_data);
Josef Bacik3407ef52011-03-24 13:54:24 +0000226 ti->private = fc;
227 return 0;
228
229bad:
230 kfree(fc);
Vivek Goyale80d1c82015-07-31 09:20:36 -0400231 return r;
Josef Bacik3407ef52011-03-24 13:54:24 +0000232}
233
234static void flakey_dtr(struct dm_target *ti)
235{
236 struct flakey_c *fc = ti->private;
237
238 dm_put_device(ti, fc->dev);
239 kfree(fc);
240}
241
242static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
243{
244 struct flakey_c *fc = ti->private;
245
Mike Snitzer30e41712011-08-02 12:32:05 +0100246 return fc->start + dm_target_offset(ti, bi_sector);
Josef Bacik3407ef52011-03-24 13:54:24 +0000247}
248
249static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
250{
251 struct flakey_c *fc = ti->private;
252
253 bio->bi_bdev = fc->dev->bdev;
254 if (bio_sectors(bio))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700255 bio->bi_iter.bi_sector =
256 flakey_map_sector(ti, bio->bi_iter.bi_sector);
Josef Bacik3407ef52011-03-24 13:54:24 +0000257}
258
Mike Snitzera3998792011-08-02 12:32:06 +0100259static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
260{
Sweet Tea1ed7c9f2018-11-13 08:04:24 -0500261 unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
262
263 struct bvec_iter iter;
264 struct bio_vec bvec;
265
266 if (!bio_has_data(bio))
267 return;
Mike Snitzera3998792011-08-02 12:32:06 +0100268
269 /*
Sweet Tea1ed7c9f2018-11-13 08:04:24 -0500270 * Overwrite the Nth byte of the bio's data, on whichever page
271 * it falls.
Mike Snitzera3998792011-08-02 12:32:06 +0100272 */
Sweet Tea1ed7c9f2018-11-13 08:04:24 -0500273 bio_for_each_segment(bvec, bio, iter) {
274 if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
275 char *segment = (page_address(bio_iter_page(bio, iter))
276 + bio_iter_offset(bio, iter));
277 segment[corrupt_bio_byte] = fc->corrupt_bio_value;
278 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
279 "(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
280 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
281 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
282 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size);
283 break;
284 }
285 corrupt_bio_byte -= bio_iter_len(bio, iter);
Mike Snitzera3998792011-08-02 12:32:06 +0100286 }
287}
288
Mikulas Patocka7de3ee52012-12-21 20:23:41 +0000289static int flakey_map(struct dm_target *ti, struct bio *bio)
Josef Bacik3407ef52011-03-24 13:54:24 +0000290{
291 struct flakey_c *fc = ti->private;
292 unsigned elapsed;
Mikulas Patockac7cfdf52012-12-21 20:23:39 +0000293 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
294 pb->bio_submitted = false;
Josef Bacik3407ef52011-03-24 13:54:24 +0000295
296 /* Are we alive ? */
297 elapsed = (jiffies - fc->start_time) / HZ;
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100298 if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
Mike Snitzera3998792011-08-02 12:32:06 +0100299 /*
300 * Flag this bio as submitted while down.
301 */
Mikulas Patockac7cfdf52012-12-21 20:23:39 +0000302 pb->bio_submitted = true;
Josef Bacik3407ef52011-03-24 13:54:24 +0000303
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100304 /*
Mike Snitzer299f6232016-08-24 21:12:58 -0400305 * Error reads if neither corrupt_bio_byte or drop_writes are set.
306 * Otherwise, flakey_end_io() will decide if the reads should be modified.
Mike Snitzera3998792011-08-02 12:32:06 +0100307 */
Mike Snitzer99f3c902016-07-29 13:19:55 -0400308 if (bio_data_dir(bio) == READ) {
Mike Snitzer299f6232016-08-24 21:12:58 -0400309 if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
Mike Snitzer99f3c902016-07-29 13:19:55 -0400310 return -EIO;
Mike Snitzer299f6232016-08-24 21:12:58 -0400311 goto map_bio;
Mike Snitzer99f3c902016-07-29 13:19:55 -0400312 }
Mike Snitzera3998792011-08-02 12:32:06 +0100313
314 /*
315 * Drop writes?
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100316 */
317 if (test_bit(DROP_WRITES, &fc->flags)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200318 bio_endio(bio);
Mike Snitzera3998792011-08-02 12:32:06 +0100319 return DM_MAPIO_SUBMITTED;
320 }
321
322 /*
323 * Corrupt matching writes.
324 */
325 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
326 if (all_corrupt_bio_flags_match(bio, fc))
327 corrupt_bio_data(bio, fc);
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100328 goto map_bio;
329 }
330
331 /*
Mike Snitzera3998792011-08-02 12:32:06 +0100332 * By default, error all I/O.
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100333 */
334 return -EIO;
335 }
336
337map_bio:
Josef Bacik3407ef52011-03-24 13:54:24 +0000338 flakey_map_bio(ti, bio);
339
340 return DM_MAPIO_REMAPPED;
341}
342
Mikulas Patocka7de3ee52012-12-21 20:23:41 +0000343static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
Mike Snitzera3998792011-08-02 12:32:06 +0100344{
345 struct flakey_c *fc = ti->private;
Mikulas Patockac7cfdf52012-12-21 20:23:39 +0000346 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
Mike Snitzera3998792011-08-02 12:32:06 +0100347
Mike Snitzer99f3c902016-07-29 13:19:55 -0400348 if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
Mike Snitzer299f6232016-08-24 21:12:58 -0400349 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
350 all_corrupt_bio_flags_match(bio, fc)) {
351 /*
352 * Corrupt successful matching READs while in down state.
353 */
Mike Snitzer99f3c902016-07-29 13:19:55 -0400354 corrupt_bio_data(bio, fc);
Mike Snitzer299f6232016-08-24 21:12:58 -0400355
356 } else if (!test_bit(DROP_WRITES, &fc->flags)) {
357 /*
358 * Error read during the down_interval if drop_writes
359 * wasn't configured.
360 */
Mike Snitzer99f3c902016-07-29 13:19:55 -0400361 return -EIO;
Mike Snitzer299f6232016-08-24 21:12:58 -0400362 }
Mike Snitzer99f3c902016-07-29 13:19:55 -0400363 }
Mike Snitzera3998792011-08-02 12:32:06 +0100364
365 return error;
366}
367
Mikulas Patockafd7c0922013-03-01 22:45:44 +0000368static void flakey_status(struct dm_target *ti, status_type_t type,
369 unsigned status_flags, char *result, unsigned maxlen)
Josef Bacik3407ef52011-03-24 13:54:24 +0000370{
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100371 unsigned sz = 0;
Josef Bacik3407ef52011-03-24 13:54:24 +0000372 struct flakey_c *fc = ti->private;
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100373 unsigned drop_writes;
Josef Bacik3407ef52011-03-24 13:54:24 +0000374
375 switch (type) {
376 case STATUSTYPE_INFO:
377 result[0] = '\0';
378 break;
379
380 case STATUSTYPE_TABLE:
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100381 DMEMIT("%s %llu %u %u ", fc->dev->name,
382 (unsigned long long)fc->start, fc->up_interval,
383 fc->down_interval);
384
385 drop_writes = test_bit(DROP_WRITES, &fc->flags);
Mike Snitzera3998792011-08-02 12:32:06 +0100386 DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
387
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100388 if (drop_writes)
389 DMEMIT("drop_writes ");
Mike Snitzera3998792011-08-02 12:32:06 +0100390
391 if (fc->corrupt_bio_byte)
392 DMEMIT("corrupt_bio_byte %u %c %u %u ",
393 fc->corrupt_bio_byte,
394 (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
395 fc->corrupt_bio_value, fc->corrupt_bio_flags);
396
Josef Bacik3407ef52011-03-24 13:54:24 +0000397 break;
398 }
Josef Bacik3407ef52011-03-24 13:54:24 +0000399}
400
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200401static int flakey_prepare_ioctl(struct dm_target *ti,
402 struct block_device **bdev, fmode_t *mode)
Josef Bacik3407ef52011-03-24 13:54:24 +0000403{
404 struct flakey_c *fc = ti->private;
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200405
406 *bdev = fc->dev->bdev;
Josef Bacik3407ef52011-03-24 13:54:24 +0000407
Paolo Bonziniec8013b2012-01-12 16:01:29 +0100408 /*
409 * Only pass ioctls through if the device sizes match exactly.
410 */
411 if (fc->start ||
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200412 ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
413 return 1;
414 return 0;
Josef Bacik3407ef52011-03-24 13:54:24 +0000415}
416
Josef Bacik3407ef52011-03-24 13:54:24 +0000417static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
418{
419 struct flakey_c *fc = ti->private;
420
421 return fn(ti, fc->dev, fc->start, ti->len, data);
422}
423
424static struct target_type flakey_target = {
425 .name = "flakey",
Mikulas Patockafd7c0922013-03-01 22:45:44 +0000426 .version = {1, 3, 1},
Josef Bacik3407ef52011-03-24 13:54:24 +0000427 .module = THIS_MODULE,
428 .ctr = flakey_ctr,
429 .dtr = flakey_dtr,
430 .map = flakey_map,
Mike Snitzera3998792011-08-02 12:32:06 +0100431 .end_io = flakey_end_io,
Josef Bacik3407ef52011-03-24 13:54:24 +0000432 .status = flakey_status,
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200433 .prepare_ioctl = flakey_prepare_ioctl,
Josef Bacik3407ef52011-03-24 13:54:24 +0000434 .iterate_devices = flakey_iterate_devices,
435};
436
437static int __init dm_flakey_init(void)
438{
439 int r = dm_register_target(&flakey_target);
440
441 if (r < 0)
442 DMERR("register failed %d", r);
443
444 return r;
445}
446
447static void __exit dm_flakey_exit(void)
448{
449 dm_unregister_target(&flakey_target);
450}
451
452/* Module hooks */
453module_init(dm_flakey_init);
454module_exit(dm_flakey_exit);
455
456MODULE_DESCRIPTION(DM_NAME " flakey target");
457MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
458MODULE_LICENSE("GPL");