blob: 3d04d5ce19d936b2ca46dce6d87d6826c023d679 [file] [log] [blame]
Josef Bacik3407ef52011-03-24 13:54:24 +00001/*
2 * Copyright (C) 2003 Sistina Software (UK) Limited.
Mike Snitzera3998792011-08-02 12:32:06 +01003 * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
Josef Bacik3407ef52011-03-24 13:54:24 +00004 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/bio.h>
14#include <linux/slab.h>
15
16#define DM_MSG_PREFIX "flakey"
17
Mike Snitzera3998792011-08-02 12:32:06 +010018#define all_corrupt_bio_flags_match(bio, fc) \
Jens Axboe1eff9d32016-08-05 15:35:16 -060019 (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
Mike Snitzera3998792011-08-02 12:32:06 +010020
Josef Bacik3407ef52011-03-24 13:54:24 +000021/*
22 * Flakey: Used for testing only, simulates intermittent,
23 * catastrophic device failure.
24 */
25struct flakey_c {
26 struct dm_dev *dev;
27 unsigned long start_time;
28 sector_t start;
29 unsigned up_interval;
30 unsigned down_interval;
Mike Snitzerb26f5e32011-08-02 12:32:05 +010031 unsigned long flags;
Mike Snitzera3998792011-08-02 12:32:06 +010032 unsigned corrupt_bio_byte;
33 unsigned corrupt_bio_rw;
34 unsigned corrupt_bio_value;
35 unsigned corrupt_bio_flags;
Josef Bacik3407ef52011-03-24 13:54:24 +000036};
37
Mike Snitzerb26f5e32011-08-02 12:32:05 +010038enum feature_flag_bits {
Mike Snitzeref548c52016-12-13 14:54:50 -050039 DROP_WRITES,
40 ERROR_WRITES
Mike Snitzerb26f5e32011-08-02 12:32:05 +010041};
42
Mikulas Patockac7cfdf52012-12-21 20:23:39 +000043struct per_bio_data {
44 bool bio_submitted;
45};
46
Mike Snitzerb26f5e32011-08-02 12:32:05 +010047static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
48 struct dm_target *ti)
Mike Snitzerdfd068b2011-08-02 12:32:05 +010049{
50 int r;
51 unsigned argc;
52 const char *arg_name;
53
54 static struct dm_arg _args[] = {
Mike Snitzera3998792011-08-02 12:32:06 +010055 {0, 6, "Invalid number of feature args"},
56 {1, UINT_MAX, "Invalid corrupt bio byte"},
57 {0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
58 {0, UINT_MAX, "Invalid corrupt bio flags mask"},
Mike Snitzerdfd068b2011-08-02 12:32:05 +010059 };
60
61 /* No feature arguments supplied. */
62 if (!as->argc)
63 return 0;
64
65 r = dm_read_arg_group(_args, as, &argc, &ti->error);
66 if (r)
Mike Snitzera3998792011-08-02 12:32:06 +010067 return r;
Mike Snitzerdfd068b2011-08-02 12:32:05 +010068
Mike Snitzera3998792011-08-02 12:32:06 +010069 while (argc) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +010070 arg_name = dm_shift_arg(as);
71 argc--;
72
Mike Snitzerb26f5e32011-08-02 12:32:05 +010073 /*
74 * drop_writes
75 */
76 if (!strcasecmp(arg_name, "drop_writes")) {
77 if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
78 ti->error = "Feature drop_writes duplicated";
79 return -EINVAL;
Mike Snitzeref548c52016-12-13 14:54:50 -050080 } else if (test_bit(ERROR_WRITES, &fc->flags)) {
81 ti->error = "Feature drop_writes conflicts with feature error_writes";
82 return -EINVAL;
83 }
84
85 continue;
86 }
87
88 /*
89 * error_writes
90 */
91 if (!strcasecmp(arg_name, "error_writes")) {
92 if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
93 ti->error = "Feature error_writes duplicated";
94 return -EINVAL;
95
96 } else if (test_bit(DROP_WRITES, &fc->flags)) {
97 ti->error = "Feature error_writes conflicts with feature drop_writes";
98 return -EINVAL;
Mike Snitzerb26f5e32011-08-02 12:32:05 +010099 }
100
101 continue;
102 }
103
Mike Snitzera3998792011-08-02 12:32:06 +0100104 /*
105 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
106 */
107 if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
Mike Snitzer68e58a22011-09-25 23:26:15 +0100108 if (!argc) {
Mike Snitzera3998792011-08-02 12:32:06 +0100109 ti->error = "Feature corrupt_bio_byte requires parameters";
Mike Snitzer68e58a22011-09-25 23:26:15 +0100110 return -EINVAL;
111 }
Mike Snitzera3998792011-08-02 12:32:06 +0100112
113 r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
114 if (r)
115 return r;
116 argc--;
117
118 /*
119 * Direction r or w?
120 */
121 arg_name = dm_shift_arg(as);
122 if (!strcasecmp(arg_name, "w"))
123 fc->corrupt_bio_rw = WRITE;
124 else if (!strcasecmp(arg_name, "r"))
125 fc->corrupt_bio_rw = READ;
126 else {
127 ti->error = "Invalid corrupt bio direction (r or w)";
128 return -EINVAL;
129 }
130 argc--;
131
132 /*
133 * Value of byte (0-255) to write in place of correct one.
134 */
135 r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
136 if (r)
137 return r;
138 argc--;
139
140 /*
141 * Only corrupt bios with these flags set.
142 */
143 r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
144 if (r)
145 return r;
146 argc--;
147
148 continue;
149 }
150
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100151 ti->error = "Unrecognised flakey feature requested";
Mike Snitzera3998792011-08-02 12:32:06 +0100152 return -EINVAL;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100153 }
154
Mike Snitzera3998792011-08-02 12:32:06 +0100155 if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
156 ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
157 return -EINVAL;
Mike Snitzeref548c52016-12-13 14:54:50 -0500158
159 } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
160 ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
161 return -EINVAL;
Mike Snitzera3998792011-08-02 12:32:06 +0100162 }
163
164 return 0;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100165}
166
Josef Bacik3407ef52011-03-24 13:54:24 +0000167/*
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100168 * Construct a flakey mapping:
169 * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100170 *
171 * Feature args:
172 * [drop_writes]
Mike Snitzera3998792011-08-02 12:32:06 +0100173 * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
174 *
175 * Nth_byte starts from 1 for the first byte.
176 * Direction is r for READ or w for WRITE.
177 * bio_flags is ignored if 0.
Josef Bacik3407ef52011-03-24 13:54:24 +0000178 */
179static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
180{
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100181 static struct dm_arg _args[] = {
182 {0, UINT_MAX, "Invalid up interval"},
183 {0, UINT_MAX, "Invalid down interval"},
184 };
Josef Bacik3407ef52011-03-24 13:54:24 +0000185
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100186 int r;
187 struct flakey_c *fc;
188 unsigned long long tmpll;
189 struct dm_arg_set as;
190 const char *devname;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100191 char dummy;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100192
193 as.argc = argc;
194 as.argv = argv;
195
196 if (argc < 4) {
197 ti->error = "Invalid argument count";
Josef Bacik3407ef52011-03-24 13:54:24 +0000198 return -EINVAL;
199 }
200
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100201 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
Josef Bacik3407ef52011-03-24 13:54:24 +0000202 if (!fc) {
Alasdair G Kergon75e3a0f2013-07-10 23:41:17 +0100203 ti->error = "Cannot allocate context";
Josef Bacik3407ef52011-03-24 13:54:24 +0000204 return -ENOMEM;
205 }
206 fc->start_time = jiffies;
207
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100208 devname = dm_shift_arg(&as);
Josef Bacik3407ef52011-03-24 13:54:24 +0000209
Vivek Goyale80d1c82015-07-31 09:20:36 -0400210 r = -EINVAL;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100211 if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100212 ti->error = "Invalid device sector";
Josef Bacik3407ef52011-03-24 13:54:24 +0000213 goto bad;
214 }
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100215 fc->start = tmpll;
Josef Bacik3407ef52011-03-24 13:54:24 +0000216
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100217 r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
218 if (r)
Josef Bacik3407ef52011-03-24 13:54:24 +0000219 goto bad;
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100220
221 r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
222 if (r)
223 goto bad;
Josef Bacik3407ef52011-03-24 13:54:24 +0000224
225 if (!(fc->up_interval + fc->down_interval)) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100226 ti->error = "Total (up + down) interval is zero";
Wei Yongjunbff7e062016-08-08 14:09:27 +0000227 r = -EINVAL;
Josef Bacik3407ef52011-03-24 13:54:24 +0000228 goto bad;
229 }
230
231 if (fc->up_interval + fc->down_interval < fc->up_interval) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100232 ti->error = "Interval overflow";
Wei Yongjunbff7e062016-08-08 14:09:27 +0000233 r = -EINVAL;
Josef Bacik3407ef52011-03-24 13:54:24 +0000234 goto bad;
235 }
236
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100237 r = parse_features(&as, fc, ti);
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100238 if (r)
239 goto bad;
240
Vivek Goyale80d1c82015-07-31 09:20:36 -0400241 r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
242 if (r) {
Mike Snitzerdfd068b2011-08-02 12:32:05 +0100243 ti->error = "Device lookup failed";
Josef Bacik3407ef52011-03-24 13:54:24 +0000244 goto bad;
245 }
246
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +0000247 ti->num_flush_bios = 1;
248 ti->num_discard_bios = 1;
Mike Snitzer30187e12016-01-31 13:28:26 -0500249 ti->per_io_data_size = sizeof(struct per_bio_data);
Josef Bacik3407ef52011-03-24 13:54:24 +0000250 ti->private = fc;
251 return 0;
252
253bad:
254 kfree(fc);
Vivek Goyale80d1c82015-07-31 09:20:36 -0400255 return r;
Josef Bacik3407ef52011-03-24 13:54:24 +0000256}
257
258static void flakey_dtr(struct dm_target *ti)
259{
260 struct flakey_c *fc = ti->private;
261
262 dm_put_device(ti, fc->dev);
263 kfree(fc);
264}
265
266static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
267{
268 struct flakey_c *fc = ti->private;
269
Mike Snitzer30e41712011-08-02 12:32:05 +0100270 return fc->start + dm_target_offset(ti, bi_sector);
Josef Bacik3407ef52011-03-24 13:54:24 +0000271}
272
273static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
274{
275 struct flakey_c *fc = ti->private;
276
277 bio->bi_bdev = fc->dev->bdev;
278 if (bio_sectors(bio))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700279 bio->bi_iter.bi_sector =
280 flakey_map_sector(ti, bio->bi_iter.bi_sector);
Josef Bacik3407ef52011-03-24 13:54:24 +0000281}
282
Mike Snitzera3998792011-08-02 12:32:06 +0100283static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
284{
285 unsigned bio_bytes = bio_cur_bytes(bio);
286 char *data = bio_data(bio);
287
288 /*
289 * Overwrite the Nth byte of the data returned.
290 */
291 if (data && bio_bytes >= fc->corrupt_bio_byte) {
292 data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
293
294 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
Jens Axboe1eff9d32016-08-05 15:35:16 -0600295 "(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n",
Mike Snitzera3998792011-08-02 12:32:06 +0100296 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
Jens Axboe1eff9d32016-08-05 15:35:16 -0600297 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
Kent Overstreet4f024f32013-10-11 15:44:27 -0700298 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
Mike Snitzera3998792011-08-02 12:32:06 +0100299 }
300}
301
Mikulas Patocka7de3ee52012-12-21 20:23:41 +0000302static int flakey_map(struct dm_target *ti, struct bio *bio)
Josef Bacik3407ef52011-03-24 13:54:24 +0000303{
304 struct flakey_c *fc = ti->private;
305 unsigned elapsed;
Mikulas Patockac7cfdf52012-12-21 20:23:39 +0000306 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
307 pb->bio_submitted = false;
Josef Bacik3407ef52011-03-24 13:54:24 +0000308
309 /* Are we alive ? */
310 elapsed = (jiffies - fc->start_time) / HZ;
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100311 if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
Mike Snitzera3998792011-08-02 12:32:06 +0100312 /*
313 * Flag this bio as submitted while down.
314 */
Mikulas Patockac7cfdf52012-12-21 20:23:39 +0000315 pb->bio_submitted = true;
Josef Bacik3407ef52011-03-24 13:54:24 +0000316
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100317 /*
Mike Snitzeref548c52016-12-13 14:54:50 -0500318 * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
Mike Snitzer299f6232016-08-24 21:12:58 -0400319 * Otherwise, flakey_end_io() will decide if the reads should be modified.
Mike Snitzera3998792011-08-02 12:32:06 +0100320 */
Mike Snitzer99f3c902016-07-29 13:19:55 -0400321 if (bio_data_dir(bio) == READ) {
Mike Snitzeref548c52016-12-13 14:54:50 -0500322 if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
323 !test_bit(ERROR_WRITES, &fc->flags))
Christoph Hellwig846785e2017-06-03 09:38:02 +0200324 return DM_MAPIO_KILL;
Mike Snitzer299f6232016-08-24 21:12:58 -0400325 goto map_bio;
Mike Snitzer99f3c902016-07-29 13:19:55 -0400326 }
Mike Snitzera3998792011-08-02 12:32:06 +0100327
328 /*
Mike Snitzeref548c52016-12-13 14:54:50 -0500329 * Drop or error writes?
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100330 */
331 if (test_bit(DROP_WRITES, &fc->flags)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200332 bio_endio(bio);
Mike Snitzera3998792011-08-02 12:32:06 +0100333 return DM_MAPIO_SUBMITTED;
334 }
Mike Snitzeref548c52016-12-13 14:54:50 -0500335 else if (test_bit(ERROR_WRITES, &fc->flags)) {
336 bio_io_error(bio);
337 return DM_MAPIO_SUBMITTED;
338 }
Mike Snitzera3998792011-08-02 12:32:06 +0100339
340 /*
341 * Corrupt matching writes.
342 */
343 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
344 if (all_corrupt_bio_flags_match(bio, fc))
345 corrupt_bio_data(bio, fc);
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100346 goto map_bio;
347 }
348
349 /*
Mike Snitzera3998792011-08-02 12:32:06 +0100350 * By default, error all I/O.
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100351 */
Christoph Hellwig846785e2017-06-03 09:38:02 +0200352 return DM_MAPIO_KILL;
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100353 }
354
355map_bio:
Josef Bacik3407ef52011-03-24 13:54:24 +0000356 flakey_map_bio(ti, bio);
357
358 return DM_MAPIO_REMAPPED;
359}
360
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200361static int flakey_end_io(struct dm_target *ti, struct bio *bio,
362 blk_status_t *error)
Mike Snitzera3998792011-08-02 12:32:06 +0100363{
364 struct flakey_c *fc = ti->private;
Mikulas Patockac7cfdf52012-12-21 20:23:39 +0000365 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
Mike Snitzera3998792011-08-02 12:32:06 +0100366
Christoph Hellwig1be56902017-06-03 09:38:03 +0200367 if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
Mike Snitzer299f6232016-08-24 21:12:58 -0400368 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
369 all_corrupt_bio_flags_match(bio, fc)) {
370 /*
371 * Corrupt successful matching READs while in down state.
372 */
Mike Snitzer99f3c902016-07-29 13:19:55 -0400373 corrupt_bio_data(bio, fc);
Mike Snitzer299f6232016-08-24 21:12:58 -0400374
Mike Snitzeref548c52016-12-13 14:54:50 -0500375 } else if (!test_bit(DROP_WRITES, &fc->flags) &&
376 !test_bit(ERROR_WRITES, &fc->flags)) {
Mike Snitzer299f6232016-08-24 21:12:58 -0400377 /*
378 * Error read during the down_interval if drop_writes
Mike Snitzeref548c52016-12-13 14:54:50 -0500379 * and error_writes were not configured.
Mike Snitzer299f6232016-08-24 21:12:58 -0400380 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200381 *error = BLK_STS_IOERR;
Mike Snitzer299f6232016-08-24 21:12:58 -0400382 }
Mike Snitzer99f3c902016-07-29 13:19:55 -0400383 }
Mike Snitzera3998792011-08-02 12:32:06 +0100384
Christoph Hellwig1be56902017-06-03 09:38:03 +0200385 return DM_ENDIO_DONE;
Mike Snitzera3998792011-08-02 12:32:06 +0100386}
387
Mikulas Patockafd7c0922013-03-01 22:45:44 +0000388static void flakey_status(struct dm_target *ti, status_type_t type,
389 unsigned status_flags, char *result, unsigned maxlen)
Josef Bacik3407ef52011-03-24 13:54:24 +0000390{
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100391 unsigned sz = 0;
Josef Bacik3407ef52011-03-24 13:54:24 +0000392 struct flakey_c *fc = ti->private;
Mike Snitzeref548c52016-12-13 14:54:50 -0500393 unsigned drop_writes, error_writes;
Josef Bacik3407ef52011-03-24 13:54:24 +0000394
395 switch (type) {
396 case STATUSTYPE_INFO:
397 result[0] = '\0';
398 break;
399
400 case STATUSTYPE_TABLE:
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100401 DMEMIT("%s %llu %u %u ", fc->dev->name,
402 (unsigned long long)fc->start, fc->up_interval,
403 fc->down_interval);
404
405 drop_writes = test_bit(DROP_WRITES, &fc->flags);
Mike Snitzeref548c52016-12-13 14:54:50 -0500406 error_writes = test_bit(ERROR_WRITES, &fc->flags);
407 DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
Mike Snitzera3998792011-08-02 12:32:06 +0100408
Mike Snitzerb26f5e32011-08-02 12:32:05 +0100409 if (drop_writes)
410 DMEMIT("drop_writes ");
Mike Snitzeref548c52016-12-13 14:54:50 -0500411 else if (error_writes)
412 DMEMIT("error_writes ");
Mike Snitzera3998792011-08-02 12:32:06 +0100413
414 if (fc->corrupt_bio_byte)
415 DMEMIT("corrupt_bio_byte %u %c %u %u ",
416 fc->corrupt_bio_byte,
417 (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
418 fc->corrupt_bio_value, fc->corrupt_bio_flags);
419
Josef Bacik3407ef52011-03-24 13:54:24 +0000420 break;
421 }
Josef Bacik3407ef52011-03-24 13:54:24 +0000422}
423
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200424static int flakey_prepare_ioctl(struct dm_target *ti,
425 struct block_device **bdev, fmode_t *mode)
Josef Bacik3407ef52011-03-24 13:54:24 +0000426{
427 struct flakey_c *fc = ti->private;
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200428
429 *bdev = fc->dev->bdev;
Josef Bacik3407ef52011-03-24 13:54:24 +0000430
Paolo Bonziniec8013b2012-01-12 16:01:29 +0100431 /*
432 * Only pass ioctls through if the device sizes match exactly.
433 */
434 if (fc->start ||
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200435 ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
436 return 1;
437 return 0;
Josef Bacik3407ef52011-03-24 13:54:24 +0000438}
439
Josef Bacik3407ef52011-03-24 13:54:24 +0000440static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
441{
442 struct flakey_c *fc = ti->private;
443
444 return fn(ti, fc->dev, fc->start, ti->len, data);
445}
446
447static struct target_type flakey_target = {
448 .name = "flakey",
Mike Snitzeref548c52016-12-13 14:54:50 -0500449 .version = {1, 4, 0},
Josef Bacik3407ef52011-03-24 13:54:24 +0000450 .module = THIS_MODULE,
451 .ctr = flakey_ctr,
452 .dtr = flakey_dtr,
453 .map = flakey_map,
Mike Snitzera3998792011-08-02 12:32:06 +0100454 .end_io = flakey_end_io,
Josef Bacik3407ef52011-03-24 13:54:24 +0000455 .status = flakey_status,
Christoph Hellwige56f81e2015-10-15 14:10:50 +0200456 .prepare_ioctl = flakey_prepare_ioctl,
Josef Bacik3407ef52011-03-24 13:54:24 +0000457 .iterate_devices = flakey_iterate_devices,
458};
459
460static int __init dm_flakey_init(void)
461{
462 int r = dm_register_target(&flakey_target);
463
464 if (r < 0)
465 DMERR("register failed %d", r);
466
467 return r;
468}
469
470static void __exit dm_flakey_exit(void)
471{
472 dm_unregister_target(&flakey_target);
473}
474
475/* Module hooks */
476module_init(dm_flakey_init);
477module_exit(dm_flakey_exit);
478
479MODULE_DESCRIPTION(DM_NAME " flakey target");
480MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
481MODULE_LICENSE("GPL");