Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * MTD device concatenation layer |
| 3 | * |
| 4 | * (C) 2002 Robert Kaiser <rkaiser@sysgo.de> |
| 5 | * |
| 6 | * NAND support by Christian Gan <cgan@iders.ca> |
| 7 | * |
| 8 | * This code is GPL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/kernel.h> |
Thomas Gleixner | 15fdc52 | 2005-11-07 00:14:42 +0100 | [diff] [blame] | 12 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/slab.h> |
Thomas Gleixner | 15fdc52 | 2005-11-07 00:14:42 +0100 | [diff] [blame] | 14 | #include <linux/sched.h> |
| 15 | #include <linux/types.h> |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/mtd/mtd.h> |
| 18 | #include <linux/mtd/concat.h> |
| 19 | |
Andrew Morton | 6c8b44a | 2006-05-20 10:17:21 +0100 | [diff] [blame] | 20 | #include <asm/div64.h> |
| 21 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | /* |
| 23 | * Our storage structure: |
| 24 | * Subdev points to an array of pointers to struct mtd_info objects |
| 25 | * which is allocated along with this structure |
| 26 | * |
| 27 | */ |
| 28 | struct mtd_concat { |
| 29 | struct mtd_info mtd; |
| 30 | int num_subdev; |
| 31 | struct mtd_info **subdev; |
| 32 | }; |
| 33 | |
| 34 | /* |
| 35 | * how to calculate the size required for the above structure, |
| 36 | * including the pointer array subdev points to: |
| 37 | */ |
| 38 | #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \ |
| 39 | ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *))) |
| 40 | |
| 41 | /* |
| 42 | * Given a pointer to the MTD object in the mtd_concat structure, |
| 43 | * we can retrieve the pointer to that structure with this macro. |
| 44 | */ |
| 45 | #define CONCAT(x) ((struct mtd_concat *)(x)) |
| 46 | |
Thomas Gleixner | 97894cd | 2005-11-07 11:15:26 +0000 | [diff] [blame] | 47 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | * MTD methods which look up the relevant subdevice, translate the |
| 49 | * effective address and pass through to the subdevice. |
| 50 | */ |
| 51 | |
| 52 | static int |
| 53 | concat_read(struct mtd_info *mtd, loff_t from, size_t len, |
| 54 | size_t * retlen, u_char * buf) |
| 55 | { |
| 56 | struct mtd_concat *concat = CONCAT(mtd); |
Thomas Gleixner | f1a28c0 | 2006-05-30 00:37:34 +0200 | [diff] [blame] | 57 | int ret = 0, err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | int i; |
| 59 | |
| 60 | *retlen = 0; |
| 61 | |
| 62 | for (i = 0; i < concat->num_subdev; i++) { |
| 63 | struct mtd_info *subdev = concat->subdev[i]; |
| 64 | size_t size, retsize; |
| 65 | |
| 66 | if (from >= subdev->size) { |
| 67 | /* Not destined for this subdev */ |
| 68 | size = 0; |
| 69 | from -= subdev->size; |
| 70 | continue; |
| 71 | } |
| 72 | if (from + len > subdev->size) |
| 73 | /* First part goes into this subdev */ |
| 74 | size = subdev->size - from; |
| 75 | else |
| 76 | /* Entire transaction goes into this subdev */ |
| 77 | size = len; |
| 78 | |
| 79 | err = subdev->read(subdev, from, size, &retsize, buf); |
| 80 | |
Thomas Gleixner | 9a1fcdf | 2006-05-29 14:56:39 +0200 | [diff] [blame] | 81 | /* Save information about bitflips! */ |
Thomas Gleixner | f1a28c0 | 2006-05-30 00:37:34 +0200 | [diff] [blame] | 82 | if (unlikely(err)) { |
| 83 | if (err == -EBADMSG) { |
| 84 | mtd->ecc_stats.failed++; |
Thomas Gleixner | 9a1fcdf | 2006-05-29 14:56:39 +0200 | [diff] [blame] | 85 | ret = err; |
Thomas Gleixner | f1a28c0 | 2006-05-30 00:37:34 +0200 | [diff] [blame] | 86 | } else if (err == -EUCLEAN) { |
| 87 | mtd->ecc_stats.corrected++; |
| 88 | /* Do not overwrite -EBADMSG !! */ |
| 89 | if (!ret) |
| 90 | ret = err; |
| 91 | } else |
| 92 | return err; |
Thomas Gleixner | 9a1fcdf | 2006-05-29 14:56:39 +0200 | [diff] [blame] | 93 | } |
| 94 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | *retlen += retsize; |
| 96 | len -= size; |
| 97 | if (len == 0) |
Thomas Gleixner | f1a28c0 | 2006-05-30 00:37:34 +0200 | [diff] [blame] | 98 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | buf += size; |
| 101 | from = 0; |
| 102 | } |
Thomas Gleixner | f1a28c0 | 2006-05-30 00:37:34 +0200 | [diff] [blame] | 103 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | static int |
| 107 | concat_write(struct mtd_info *mtd, loff_t to, size_t len, |
| 108 | size_t * retlen, const u_char * buf) |
| 109 | { |
| 110 | struct mtd_concat *concat = CONCAT(mtd); |
| 111 | int err = -EINVAL; |
| 112 | int i; |
| 113 | |
| 114 | if (!(mtd->flags & MTD_WRITEABLE)) |
| 115 | return -EROFS; |
| 116 | |
| 117 | *retlen = 0; |
| 118 | |
| 119 | for (i = 0; i < concat->num_subdev; i++) { |
| 120 | struct mtd_info *subdev = concat->subdev[i]; |
| 121 | size_t size, retsize; |
| 122 | |
| 123 | if (to >= subdev->size) { |
| 124 | size = 0; |
| 125 | to -= subdev->size; |
| 126 | continue; |
| 127 | } |
| 128 | if (to + len > subdev->size) |
| 129 | size = subdev->size - to; |
| 130 | else |
| 131 | size = len; |
| 132 | |
| 133 | if (!(subdev->flags & MTD_WRITEABLE)) |
| 134 | err = -EROFS; |
| 135 | else |
| 136 | err = subdev->write(subdev, to, size, &retsize, buf); |
| 137 | |
| 138 | if (err) |
| 139 | break; |
| 140 | |
| 141 | *retlen += retsize; |
| 142 | len -= size; |
| 143 | if (len == 0) |
| 144 | break; |
| 145 | |
| 146 | err = -EINVAL; |
| 147 | buf += size; |
| 148 | to = 0; |
| 149 | } |
| 150 | return err; |
| 151 | } |
| 152 | |
| 153 | static int |
Thomas Gleixner | 9d8522d | 2006-05-23 16:06:03 +0200 | [diff] [blame] | 154 | concat_writev(struct mtd_info *mtd, const struct kvec *vecs, |
| 155 | unsigned long count, loff_t to, size_t * retlen) |
Alexander Belyakov | e8d3293 | 2006-05-17 19:11:16 +0400 | [diff] [blame] | 156 | { |
| 157 | struct mtd_concat *concat = CONCAT(mtd); |
| 158 | struct kvec *vecs_copy; |
| 159 | unsigned long entry_low, entry_high; |
| 160 | size_t total_len = 0; |
| 161 | int i; |
| 162 | int err = -EINVAL; |
| 163 | |
| 164 | if (!(mtd->flags & MTD_WRITEABLE)) |
| 165 | return -EROFS; |
| 166 | |
| 167 | *retlen = 0; |
| 168 | |
| 169 | /* Calculate total length of data */ |
| 170 | for (i = 0; i < count; i++) |
| 171 | total_len += vecs[i].iov_len; |
| 172 | |
| 173 | /* Do not allow write past end of device */ |
| 174 | if ((to + total_len) > mtd->size) |
| 175 | return -EINVAL; |
| 176 | |
| 177 | /* Check alignment */ |
Joern Engel | 2831877 | 2006-05-22 23:18:05 +0200 | [diff] [blame] | 178 | if (mtd->writesize > 1) { |
David Woodhouse | 0bf9733 | 2007-07-23 13:07:06 +0100 | [diff] [blame] | 179 | uint64_t __to = to; |
Joern Engel | 2831877 | 2006-05-22 23:18:05 +0200 | [diff] [blame] | 180 | if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize)) |
Alexander Belyakov | e8d3293 | 2006-05-17 19:11:16 +0400 | [diff] [blame] | 181 | return -EINVAL; |
Andrew Morton | 6c8b44a | 2006-05-20 10:17:21 +0100 | [diff] [blame] | 182 | } |
Alexander Belyakov | e8d3293 | 2006-05-17 19:11:16 +0400 | [diff] [blame] | 183 | |
| 184 | /* make a copy of vecs */ |
| 185 | vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL); |
| 186 | if (!vecs_copy) |
| 187 | return -ENOMEM; |
| 188 | memcpy(vecs_copy, vecs, sizeof(struct kvec) * count); |
| 189 | |
| 190 | entry_low = 0; |
| 191 | for (i = 0; i < concat->num_subdev; i++) { |
| 192 | struct mtd_info *subdev = concat->subdev[i]; |
| 193 | size_t size, wsize, retsize, old_iov_len; |
| 194 | |
| 195 | if (to >= subdev->size) { |
| 196 | to -= subdev->size; |
| 197 | continue; |
| 198 | } |
| 199 | |
| 200 | size = min(total_len, (size_t)(subdev->size - to)); |
| 201 | wsize = size; /* store for future use */ |
| 202 | |
| 203 | entry_high = entry_low; |
| 204 | while (entry_high < count) { |
| 205 | if (size <= vecs_copy[entry_high].iov_len) |
| 206 | break; |
| 207 | size -= vecs_copy[entry_high++].iov_len; |
| 208 | } |
| 209 | |
| 210 | old_iov_len = vecs_copy[entry_high].iov_len; |
| 211 | vecs_copy[entry_high].iov_len = size; |
| 212 | |
| 213 | if (!(subdev->flags & MTD_WRITEABLE)) |
| 214 | err = -EROFS; |
Alexander Belyakov | e8d3293 | 2006-05-17 19:11:16 +0400 | [diff] [blame] | 215 | else |
| 216 | err = subdev->writev(subdev, &vecs_copy[entry_low], |
| 217 | entry_high - entry_low + 1, to, &retsize); |
| 218 | |
| 219 | vecs_copy[entry_high].iov_len = old_iov_len - size; |
| 220 | vecs_copy[entry_high].iov_base += size; |
| 221 | |
| 222 | entry_low = entry_high; |
| 223 | |
| 224 | if (err) |
| 225 | break; |
| 226 | |
| 227 | *retlen += retsize; |
| 228 | total_len -= wsize; |
Alexander Belyakov | e8d3293 | 2006-05-17 19:11:16 +0400 | [diff] [blame] | 229 | |
| 230 | if (total_len == 0) |
| 231 | break; |
| 232 | |
| 233 | err = -EINVAL; |
| 234 | to = 0; |
| 235 | } |
| 236 | |
| 237 | kfree(vecs_copy); |
| 238 | return err; |
| 239 | } |
| 240 | |
| 241 | static int |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 242 | concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | { |
| 244 | struct mtd_concat *concat = CONCAT(mtd); |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 245 | struct mtd_oob_ops devops = *ops; |
Thomas Gleixner | f1a28c0 | 2006-05-30 00:37:34 +0200 | [diff] [blame] | 246 | int i, err, ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
Vitaly Wool | 7014568 | 2006-11-03 18:20:38 +0300 | [diff] [blame] | 248 | ops->retlen = ops->oobretlen = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | |
| 250 | for (i = 0; i < concat->num_subdev; i++) { |
| 251 | struct mtd_info *subdev = concat->subdev[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | |
| 253 | if (from >= subdev->size) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | from -= subdev->size; |
| 255 | continue; |
| 256 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 258 | /* partial read ? */ |
| 259 | if (from + devops.len > subdev->size) |
| 260 | devops.len = subdev->size - from; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 262 | err = subdev->read_oob(subdev, from, &devops); |
| 263 | ops->retlen += devops.retlen; |
Vitaly Wool | 7014568 | 2006-11-03 18:20:38 +0300 | [diff] [blame] | 264 | ops->oobretlen += devops.oobretlen; |
Thomas Gleixner | f1a28c0 | 2006-05-30 00:37:34 +0200 | [diff] [blame] | 265 | |
| 266 | /* Save information about bitflips! */ |
| 267 | if (unlikely(err)) { |
| 268 | if (err == -EBADMSG) { |
| 269 | mtd->ecc_stats.failed++; |
| 270 | ret = err; |
| 271 | } else if (err == -EUCLEAN) { |
| 272 | mtd->ecc_stats.corrected++; |
| 273 | /* Do not overwrite -EBADMSG !! */ |
| 274 | if (!ret) |
| 275 | ret = err; |
| 276 | } else |
| 277 | return err; |
| 278 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | |
Vitaly Wool | 7014568 | 2006-11-03 18:20:38 +0300 | [diff] [blame] | 280 | if (devops.datbuf) { |
| 281 | devops.len = ops->len - ops->retlen; |
| 282 | if (!devops.len) |
| 283 | return ret; |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 284 | devops.datbuf += devops.retlen; |
Vitaly Wool | 7014568 | 2006-11-03 18:20:38 +0300 | [diff] [blame] | 285 | } |
| 286 | if (devops.oobbuf) { |
| 287 | devops.ooblen = ops->ooblen - ops->oobretlen; |
| 288 | if (!devops.ooblen) |
| 289 | return ret; |
| 290 | devops.oobbuf += ops->oobretlen; |
| 291 | } |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 292 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | from = 0; |
| 294 | } |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 295 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | static int |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 299 | concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | { |
| 301 | struct mtd_concat *concat = CONCAT(mtd); |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 302 | struct mtd_oob_ops devops = *ops; |
| 303 | int i, err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | |
| 305 | if (!(mtd->flags & MTD_WRITEABLE)) |
| 306 | return -EROFS; |
| 307 | |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 308 | ops->retlen = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | |
| 310 | for (i = 0; i < concat->num_subdev; i++) { |
| 311 | struct mtd_info *subdev = concat->subdev[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | |
| 313 | if (to >= subdev->size) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | to -= subdev->size; |
| 315 | continue; |
| 316 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 318 | /* partial write ? */ |
| 319 | if (to + devops.len > subdev->size) |
| 320 | devops.len = subdev->size - to; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 322 | err = subdev->write_oob(subdev, to, &devops); |
| 323 | ops->retlen += devops.retlen; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | if (err) |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 325 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | |
Vitaly Wool | 7014568 | 2006-11-03 18:20:38 +0300 | [diff] [blame] | 327 | if (devops.datbuf) { |
| 328 | devops.len = ops->len - ops->retlen; |
| 329 | if (!devops.len) |
| 330 | return 0; |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 331 | devops.datbuf += devops.retlen; |
Vitaly Wool | 7014568 | 2006-11-03 18:20:38 +0300 | [diff] [blame] | 332 | } |
| 333 | if (devops.oobbuf) { |
| 334 | devops.ooblen = ops->ooblen - ops->oobretlen; |
| 335 | if (!devops.ooblen) |
| 336 | return 0; |
| 337 | devops.oobbuf += devops.oobretlen; |
| 338 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | to = 0; |
| 340 | } |
Thomas Gleixner | 8593fbc | 2006-05-29 03:26:58 +0200 | [diff] [blame] | 341 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | } |
| 343 | |
| 344 | static void concat_erase_callback(struct erase_info *instr) |
| 345 | { |
| 346 | wake_up((wait_queue_head_t *) instr->priv); |
| 347 | } |
| 348 | |
| 349 | static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase) |
| 350 | { |
| 351 | int err; |
| 352 | wait_queue_head_t waitq; |
| 353 | DECLARE_WAITQUEUE(wait, current); |
| 354 | |
| 355 | /* |
| 356 | * This code was stol^H^H^H^Hinspired by mtdchar.c |
| 357 | */ |
| 358 | init_waitqueue_head(&waitq); |
| 359 | |
| 360 | erase->mtd = mtd; |
| 361 | erase->callback = concat_erase_callback; |
| 362 | erase->priv = (unsigned long) &waitq; |
| 363 | |
| 364 | /* |
| 365 | * FIXME: Allow INTERRUPTIBLE. Which means |
| 366 | * not having the wait_queue head on the stack. |
| 367 | */ |
| 368 | err = mtd->erase(mtd, erase); |
| 369 | if (!err) { |
| 370 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 371 | add_wait_queue(&waitq, &wait); |
| 372 | if (erase->state != MTD_ERASE_DONE |
| 373 | && erase->state != MTD_ERASE_FAILED) |
| 374 | schedule(); |
| 375 | remove_wait_queue(&waitq, &wait); |
| 376 | set_current_state(TASK_RUNNING); |
| 377 | |
| 378 | err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0; |
| 379 | } |
| 380 | return err; |
| 381 | } |
| 382 | |
| 383 | static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) |
| 384 | { |
| 385 | struct mtd_concat *concat = CONCAT(mtd); |
| 386 | struct mtd_info *subdev; |
| 387 | int i, err; |
| 388 | u_int32_t length, offset = 0; |
| 389 | struct erase_info *erase; |
| 390 | |
| 391 | if (!(mtd->flags & MTD_WRITEABLE)) |
| 392 | return -EROFS; |
| 393 | |
| 394 | if (instr->addr > concat->mtd.size) |
| 395 | return -EINVAL; |
| 396 | |
| 397 | if (instr->len + instr->addr > concat->mtd.size) |
| 398 | return -EINVAL; |
| 399 | |
| 400 | /* |
| 401 | * Check for proper erase block alignment of the to-be-erased area. |
| 402 | * It is easier to do this based on the super device's erase |
| 403 | * region info rather than looking at each particular sub-device |
| 404 | * in turn. |
| 405 | */ |
| 406 | if (!concat->mtd.numeraseregions) { |
| 407 | /* the easy case: device has uniform erase block size */ |
| 408 | if (instr->addr & (concat->mtd.erasesize - 1)) |
| 409 | return -EINVAL; |
| 410 | if (instr->len & (concat->mtd.erasesize - 1)) |
| 411 | return -EINVAL; |
| 412 | } else { |
| 413 | /* device has variable erase size */ |
| 414 | struct mtd_erase_region_info *erase_regions = |
| 415 | concat->mtd.eraseregions; |
| 416 | |
| 417 | /* |
| 418 | * Find the erase region where the to-be-erased area begins: |
| 419 | */ |
| 420 | for (i = 0; i < concat->mtd.numeraseregions && |
| 421 | instr->addr >= erase_regions[i].offset; i++) ; |
| 422 | --i; |
| 423 | |
| 424 | /* |
| 425 | * Now erase_regions[i] is the region in which the |
| 426 | * to-be-erased area begins. Verify that the starting |
| 427 | * offset is aligned to this region's erase size: |
| 428 | */ |
| 429 | if (instr->addr & (erase_regions[i].erasesize - 1)) |
| 430 | return -EINVAL; |
| 431 | |
| 432 | /* |
| 433 | * now find the erase region where the to-be-erased area ends: |
| 434 | */ |
| 435 | for (; i < concat->mtd.numeraseregions && |
| 436 | (instr->addr + instr->len) >= erase_regions[i].offset; |
| 437 | ++i) ; |
| 438 | --i; |
| 439 | /* |
| 440 | * check if the ending offset is aligned to this region's erase size |
| 441 | */ |
| 442 | if ((instr->addr + instr->len) & (erase_regions[i].erasesize - |
| 443 | 1)) |
| 444 | return -EINVAL; |
| 445 | } |
| 446 | |
Adrian Hunter | bb0eb21 | 2008-08-12 12:40:50 +0300 | [diff] [blame] | 447 | instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | |
| 449 | /* make a local copy of instr to avoid modifying the caller's struct */ |
| 450 | erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); |
| 451 | |
| 452 | if (!erase) |
| 453 | return -ENOMEM; |
| 454 | |
| 455 | *erase = *instr; |
| 456 | length = instr->len; |
| 457 | |
| 458 | /* |
| 459 | * find the subdevice where the to-be-erased area begins, adjust |
| 460 | * starting offset to be relative to the subdevice start |
| 461 | */ |
| 462 | for (i = 0; i < concat->num_subdev; i++) { |
| 463 | subdev = concat->subdev[i]; |
| 464 | if (subdev->size <= erase->addr) { |
| 465 | erase->addr -= subdev->size; |
| 466 | offset += subdev->size; |
| 467 | } else { |
| 468 | break; |
| 469 | } |
| 470 | } |
| 471 | |
| 472 | /* must never happen since size limit has been verified above */ |
Eric Sesterhenn | 373ebfb | 2006-03-26 18:15:12 +0200 | [diff] [blame] | 473 | BUG_ON(i >= concat->num_subdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | |
| 475 | /* now do the erase: */ |
| 476 | err = 0; |
| 477 | for (; length > 0; i++) { |
| 478 | /* loop for all subdevices affected by this request */ |
| 479 | subdev = concat->subdev[i]; /* get current subdevice */ |
| 480 | |
| 481 | /* limit length to subdevice's size: */ |
| 482 | if (erase->addr + length > subdev->size) |
| 483 | erase->len = subdev->size - erase->addr; |
| 484 | else |
| 485 | erase->len = length; |
| 486 | |
| 487 | if (!(subdev->flags & MTD_WRITEABLE)) { |
| 488 | err = -EROFS; |
| 489 | break; |
| 490 | } |
| 491 | length -= erase->len; |
| 492 | if ((err = concat_dev_erase(subdev, erase))) { |
| 493 | /* sanity check: should never happen since |
| 494 | * block alignment has been checked above */ |
Eric Sesterhenn | 373ebfb | 2006-03-26 18:15:12 +0200 | [diff] [blame] | 495 | BUG_ON(err == -EINVAL); |
Adrian Hunter | bb0eb21 | 2008-08-12 12:40:50 +0300 | [diff] [blame] | 496 | if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | instr->fail_addr = erase->fail_addr + offset; |
| 498 | break; |
| 499 | } |
| 500 | /* |
| 501 | * erase->addr specifies the offset of the area to be |
| 502 | * erased *within the current subdevice*. It can be |
| 503 | * non-zero only the first time through this loop, i.e. |
| 504 | * for the first subdevice where blocks need to be erased. |
| 505 | * All the following erases must begin at the start of the |
| 506 | * current subdevice, i.e. at offset zero. |
| 507 | */ |
| 508 | erase->addr = 0; |
| 509 | offset += subdev->size; |
| 510 | } |
| 511 | instr->state = erase->state; |
| 512 | kfree(erase); |
| 513 | if (err) |
| 514 | return err; |
| 515 | |
| 516 | if (instr->callback) |
| 517 | instr->callback(instr); |
| 518 | return 0; |
| 519 | } |
| 520 | |
| 521 | static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len) |
| 522 | { |
| 523 | struct mtd_concat *concat = CONCAT(mtd); |
| 524 | int i, err = -EINVAL; |
| 525 | |
| 526 | if ((len + ofs) > mtd->size) |
| 527 | return -EINVAL; |
| 528 | |
| 529 | for (i = 0; i < concat->num_subdev; i++) { |
| 530 | struct mtd_info *subdev = concat->subdev[i]; |
| 531 | size_t size; |
| 532 | |
| 533 | if (ofs >= subdev->size) { |
| 534 | size = 0; |
| 535 | ofs -= subdev->size; |
| 536 | continue; |
| 537 | } |
| 538 | if (ofs + len > subdev->size) |
| 539 | size = subdev->size - ofs; |
| 540 | else |
| 541 | size = len; |
| 542 | |
| 543 | err = subdev->lock(subdev, ofs, size); |
| 544 | |
| 545 | if (err) |
| 546 | break; |
| 547 | |
| 548 | len -= size; |
| 549 | if (len == 0) |
| 550 | break; |
| 551 | |
| 552 | err = -EINVAL; |
| 553 | ofs = 0; |
| 554 | } |
| 555 | |
| 556 | return err; |
| 557 | } |
| 558 | |
| 559 | static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) |
| 560 | { |
| 561 | struct mtd_concat *concat = CONCAT(mtd); |
| 562 | int i, err = 0; |
| 563 | |
| 564 | if ((len + ofs) > mtd->size) |
| 565 | return -EINVAL; |
| 566 | |
| 567 | for (i = 0; i < concat->num_subdev; i++) { |
| 568 | struct mtd_info *subdev = concat->subdev[i]; |
| 569 | size_t size; |
| 570 | |
| 571 | if (ofs >= subdev->size) { |
| 572 | size = 0; |
| 573 | ofs -= subdev->size; |
| 574 | continue; |
| 575 | } |
| 576 | if (ofs + len > subdev->size) |
| 577 | size = subdev->size - ofs; |
| 578 | else |
| 579 | size = len; |
| 580 | |
| 581 | err = subdev->unlock(subdev, ofs, size); |
| 582 | |
| 583 | if (err) |
| 584 | break; |
| 585 | |
| 586 | len -= size; |
| 587 | if (len == 0) |
| 588 | break; |
| 589 | |
| 590 | err = -EINVAL; |
| 591 | ofs = 0; |
| 592 | } |
| 593 | |
| 594 | return err; |
| 595 | } |
| 596 | |
| 597 | static void concat_sync(struct mtd_info *mtd) |
| 598 | { |
| 599 | struct mtd_concat *concat = CONCAT(mtd); |
| 600 | int i; |
| 601 | |
| 602 | for (i = 0; i < concat->num_subdev; i++) { |
| 603 | struct mtd_info *subdev = concat->subdev[i]; |
| 604 | subdev->sync(subdev); |
| 605 | } |
| 606 | } |
| 607 | |
| 608 | static int concat_suspend(struct mtd_info *mtd) |
| 609 | { |
| 610 | struct mtd_concat *concat = CONCAT(mtd); |
| 611 | int i, rc = 0; |
| 612 | |
| 613 | for (i = 0; i < concat->num_subdev; i++) { |
| 614 | struct mtd_info *subdev = concat->subdev[i]; |
| 615 | if ((rc = subdev->suspend(subdev)) < 0) |
| 616 | return rc; |
| 617 | } |
| 618 | return rc; |
| 619 | } |
| 620 | |
| 621 | static void concat_resume(struct mtd_info *mtd) |
| 622 | { |
| 623 | struct mtd_concat *concat = CONCAT(mtd); |
| 624 | int i; |
| 625 | |
| 626 | for (i = 0; i < concat->num_subdev; i++) { |
| 627 | struct mtd_info *subdev = concat->subdev[i]; |
| 628 | subdev->resume(subdev); |
| 629 | } |
| 630 | } |
| 631 | |
Alexander Belyakov | e8d3293 | 2006-05-17 19:11:16 +0400 | [diff] [blame] | 632 | static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs) |
| 633 | { |
| 634 | struct mtd_concat *concat = CONCAT(mtd); |
| 635 | int i, res = 0; |
| 636 | |
| 637 | if (!concat->subdev[0]->block_isbad) |
| 638 | return res; |
| 639 | |
| 640 | if (ofs > mtd->size) |
| 641 | return -EINVAL; |
| 642 | |
| 643 | for (i = 0; i < concat->num_subdev; i++) { |
| 644 | struct mtd_info *subdev = concat->subdev[i]; |
| 645 | |
| 646 | if (ofs >= subdev->size) { |
| 647 | ofs -= subdev->size; |
| 648 | continue; |
| 649 | } |
| 650 | |
| 651 | res = subdev->block_isbad(subdev, ofs); |
| 652 | break; |
| 653 | } |
| 654 | |
| 655 | return res; |
| 656 | } |
| 657 | |
| 658 | static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs) |
| 659 | { |
| 660 | struct mtd_concat *concat = CONCAT(mtd); |
| 661 | int i, err = -EINVAL; |
| 662 | |
| 663 | if (!concat->subdev[0]->block_markbad) |
| 664 | return 0; |
| 665 | |
| 666 | if (ofs > mtd->size) |
| 667 | return -EINVAL; |
| 668 | |
| 669 | for (i = 0; i < concat->num_subdev; i++) { |
| 670 | struct mtd_info *subdev = concat->subdev[i]; |
| 671 | |
| 672 | if (ofs >= subdev->size) { |
| 673 | ofs -= subdev->size; |
| 674 | continue; |
| 675 | } |
| 676 | |
| 677 | err = subdev->block_markbad(subdev, ofs); |
Thomas Gleixner | f1a28c0 | 2006-05-30 00:37:34 +0200 | [diff] [blame] | 678 | if (!err) |
| 679 | mtd->ecc_stats.badblocks++; |
Alexander Belyakov | e8d3293 | 2006-05-17 19:11:16 +0400 | [diff] [blame] | 680 | break; |
| 681 | } |
| 682 | |
| 683 | return err; |
| 684 | } |
| 685 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | /* |
| 687 | * This function constructs a virtual MTD device by concatenating |
| 688 | * num_devs MTD devices. A pointer to the new device object is |
| 689 | * stored to *new_dev upon success. This function does _not_ |
| 690 | * register any devices: this is the caller's responsibility. |
| 691 | */ |
| 692 | struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */ |
| 693 | int num_devs, /* number of subdevices */ |
| 694 | char *name) |
| 695 | { /* name for the new device */ |
| 696 | int i; |
| 697 | size_t size; |
| 698 | struct mtd_concat *concat; |
| 699 | u_int32_t max_erasesize, curr_erasesize; |
| 700 | int num_erase_region; |
| 701 | |
| 702 | printk(KERN_NOTICE "Concatenating MTD devices:\n"); |
| 703 | for (i = 0; i < num_devs; i++) |
| 704 | printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name); |
| 705 | printk(KERN_NOTICE "into device \"%s\"\n", name); |
| 706 | |
| 707 | /* allocate the device structure */ |
| 708 | size = SIZEOF_STRUCT_MTD_CONCAT(num_devs); |
Burman Yan | 95b93a0 | 2006-11-15 21:10:29 +0200 | [diff] [blame] | 709 | concat = kzalloc(size, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | if (!concat) { |
| 711 | printk |
| 712 | ("memory allocation error while creating concatenated device \"%s\"\n", |
| 713 | name); |
| 714 | return NULL; |
| 715 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | concat->subdev = (struct mtd_info **) (concat + 1); |
| 717 | |
| 718 | /* |
| 719 | * Set up the new "super" device's MTD object structure, check for |
| 720 | * incompatibilites between the subdevices. |
| 721 | */ |
| 722 | concat->mtd.type = subdev[0]->type; |
| 723 | concat->mtd.flags = subdev[0]->flags; |
| 724 | concat->mtd.size = subdev[0]->size; |
| 725 | concat->mtd.erasesize = subdev[0]->erasesize; |
Joern Engel | 2831877 | 2006-05-22 23:18:05 +0200 | [diff] [blame] | 726 | concat->mtd.writesize = subdev[0]->writesize; |
Chris Paulson-Ellis | a2e1b83 | 2007-10-12 10:54:06 +0100 | [diff] [blame] | 727 | concat->mtd.subpage_sft = subdev[0]->subpage_sft; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | concat->mtd.oobsize = subdev[0]->oobsize; |
Vitaly Wool | 1f92267 | 2007-03-06 16:56:34 +0300 | [diff] [blame] | 729 | concat->mtd.oobavail = subdev[0]->oobavail; |
Alexander Belyakov | e8d3293 | 2006-05-17 19:11:16 +0400 | [diff] [blame] | 730 | if (subdev[0]->writev) |
| 731 | concat->mtd.writev = concat_writev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | if (subdev[0]->read_oob) |
| 733 | concat->mtd.read_oob = concat_read_oob; |
| 734 | if (subdev[0]->write_oob) |
| 735 | concat->mtd.write_oob = concat_write_oob; |
Alexander Belyakov | e8d3293 | 2006-05-17 19:11:16 +0400 | [diff] [blame] | 736 | if (subdev[0]->block_isbad) |
| 737 | concat->mtd.block_isbad = concat_block_isbad; |
| 738 | if (subdev[0]->block_markbad) |
| 739 | concat->mtd.block_markbad = concat_block_markbad; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | |
Thomas Gleixner | f1a28c0 | 2006-05-30 00:37:34 +0200 | [diff] [blame] | 741 | concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; |
| 742 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | concat->subdev[0] = subdev[0]; |
| 744 | |
| 745 | for (i = 1; i < num_devs; i++) { |
| 746 | if (concat->mtd.type != subdev[i]->type) { |
| 747 | kfree(concat); |
| 748 | printk("Incompatible device type on \"%s\"\n", |
| 749 | subdev[i]->name); |
| 750 | return NULL; |
| 751 | } |
| 752 | if (concat->mtd.flags != subdev[i]->flags) { |
| 753 | /* |
| 754 | * Expect all flags except MTD_WRITEABLE to be |
| 755 | * equal on all subdevices. |
| 756 | */ |
| 757 | if ((concat->mtd.flags ^ subdev[i]-> |
| 758 | flags) & ~MTD_WRITEABLE) { |
| 759 | kfree(concat); |
| 760 | printk("Incompatible device flags on \"%s\"\n", |
| 761 | subdev[i]->name); |
| 762 | return NULL; |
| 763 | } else |
| 764 | /* if writeable attribute differs, |
| 765 | make super device writeable */ |
| 766 | concat->mtd.flags |= |
| 767 | subdev[i]->flags & MTD_WRITEABLE; |
| 768 | } |
| 769 | concat->mtd.size += subdev[i]->size; |
Thomas Gleixner | f1a28c0 | 2006-05-30 00:37:34 +0200 | [diff] [blame] | 770 | concat->mtd.ecc_stats.badblocks += |
| 771 | subdev[i]->ecc_stats.badblocks; |
Joern Engel | 2831877 | 2006-05-22 23:18:05 +0200 | [diff] [blame] | 772 | if (concat->mtd.writesize != subdev[i]->writesize || |
Thomas Gleixner | 29072b9 | 2006-09-28 15:38:36 +0200 | [diff] [blame] | 773 | concat->mtd.subpage_sft != subdev[i]->subpage_sft || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | concat->mtd.oobsize != subdev[i]->oobsize || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | !concat->mtd.read_oob != !subdev[i]->read_oob || |
| 776 | !concat->mtd.write_oob != !subdev[i]->write_oob) { |
| 777 | kfree(concat); |
| 778 | printk("Incompatible OOB or ECC data on \"%s\"\n", |
| 779 | subdev[i]->name); |
| 780 | return NULL; |
| 781 | } |
| 782 | concat->subdev[i] = subdev[i]; |
| 783 | |
| 784 | } |
| 785 | |
Thomas Gleixner | 5bd34c0 | 2006-05-27 22:16:10 +0200 | [diff] [blame] | 786 | concat->mtd.ecclayout = subdev[0]->ecclayout; |
Alexander Belyakov | e8d3293 | 2006-05-17 19:11:16 +0400 | [diff] [blame] | 787 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | concat->num_subdev = num_devs; |
| 789 | concat->mtd.name = name; |
| 790 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 791 | concat->mtd.erase = concat_erase; |
| 792 | concat->mtd.read = concat_read; |
| 793 | concat->mtd.write = concat_write; |
| 794 | concat->mtd.sync = concat_sync; |
| 795 | concat->mtd.lock = concat_lock; |
| 796 | concat->mtd.unlock = concat_unlock; |
| 797 | concat->mtd.suspend = concat_suspend; |
| 798 | concat->mtd.resume = concat_resume; |
| 799 | |
| 800 | /* |
| 801 | * Combine the erase block size info of the subdevices: |
| 802 | * |
| 803 | * first, walk the map of the new device and see how |
| 804 | * many changes in erase size we have |
| 805 | */ |
| 806 | max_erasesize = curr_erasesize = subdev[0]->erasesize; |
| 807 | num_erase_region = 1; |
| 808 | for (i = 0; i < num_devs; i++) { |
| 809 | if (subdev[i]->numeraseregions == 0) { |
| 810 | /* current subdevice has uniform erase size */ |
| 811 | if (subdev[i]->erasesize != curr_erasesize) { |
| 812 | /* if it differs from the last subdevice's erase size, count it */ |
| 813 | ++num_erase_region; |
| 814 | curr_erasesize = subdev[i]->erasesize; |
| 815 | if (curr_erasesize > max_erasesize) |
| 816 | max_erasesize = curr_erasesize; |
| 817 | } |
| 818 | } else { |
| 819 | /* current subdevice has variable erase size */ |
| 820 | int j; |
| 821 | for (j = 0; j < subdev[i]->numeraseregions; j++) { |
| 822 | |
| 823 | /* walk the list of erase regions, count any changes */ |
| 824 | if (subdev[i]->eraseregions[j].erasesize != |
| 825 | curr_erasesize) { |
| 826 | ++num_erase_region; |
| 827 | curr_erasesize = |
| 828 | subdev[i]->eraseregions[j]. |
| 829 | erasesize; |
| 830 | if (curr_erasesize > max_erasesize) |
| 831 | max_erasesize = curr_erasesize; |
| 832 | } |
| 833 | } |
| 834 | } |
| 835 | } |
| 836 | |
| 837 | if (num_erase_region == 1) { |
| 838 | /* |
| 839 | * All subdevices have the same uniform erase size. |
| 840 | * This is easy: |
| 841 | */ |
| 842 | concat->mtd.erasesize = curr_erasesize; |
| 843 | concat->mtd.numeraseregions = 0; |
| 844 | } else { |
| 845 | /* |
| 846 | * erase block size varies across the subdevices: allocate |
| 847 | * space to store the data describing the variable erase regions |
| 848 | */ |
| 849 | struct mtd_erase_region_info *erase_region_p; |
| 850 | u_int32_t begin, position; |
| 851 | |
| 852 | concat->mtd.erasesize = max_erasesize; |
| 853 | concat->mtd.numeraseregions = num_erase_region; |
| 854 | concat->mtd.eraseregions = erase_region_p = |
| 855 | kmalloc(num_erase_region * |
| 856 | sizeof (struct mtd_erase_region_info), GFP_KERNEL); |
| 857 | if (!erase_region_p) { |
| 858 | kfree(concat); |
| 859 | printk |
| 860 | ("memory allocation error while creating erase region list" |
| 861 | " for device \"%s\"\n", name); |
| 862 | return NULL; |
| 863 | } |
| 864 | |
| 865 | /* |
| 866 | * walk the map of the new device once more and fill in |
| 867 | * in erase region info: |
| 868 | */ |
| 869 | curr_erasesize = subdev[0]->erasesize; |
| 870 | begin = position = 0; |
| 871 | for (i = 0; i < num_devs; i++) { |
| 872 | if (subdev[i]->numeraseregions == 0) { |
| 873 | /* current subdevice has uniform erase size */ |
| 874 | if (subdev[i]->erasesize != curr_erasesize) { |
| 875 | /* |
| 876 | * fill in an mtd_erase_region_info structure for the area |
| 877 | * we have walked so far: |
| 878 | */ |
| 879 | erase_region_p->offset = begin; |
| 880 | erase_region_p->erasesize = |
| 881 | curr_erasesize; |
| 882 | erase_region_p->numblocks = |
| 883 | (position - begin) / curr_erasesize; |
| 884 | begin = position; |
| 885 | |
| 886 | curr_erasesize = subdev[i]->erasesize; |
| 887 | ++erase_region_p; |
| 888 | } |
| 889 | position += subdev[i]->size; |
| 890 | } else { |
| 891 | /* current subdevice has variable erase size */ |
| 892 | int j; |
| 893 | for (j = 0; j < subdev[i]->numeraseregions; j++) { |
| 894 | /* walk the list of erase regions, count any changes */ |
| 895 | if (subdev[i]->eraseregions[j]. |
| 896 | erasesize != curr_erasesize) { |
| 897 | erase_region_p->offset = begin; |
| 898 | erase_region_p->erasesize = |
| 899 | curr_erasesize; |
| 900 | erase_region_p->numblocks = |
| 901 | (position - |
| 902 | begin) / curr_erasesize; |
| 903 | begin = position; |
| 904 | |
| 905 | curr_erasesize = |
| 906 | subdev[i]->eraseregions[j]. |
| 907 | erasesize; |
| 908 | ++erase_region_p; |
| 909 | } |
| 910 | position += |
| 911 | subdev[i]->eraseregions[j]. |
| 912 | numblocks * curr_erasesize; |
| 913 | } |
| 914 | } |
| 915 | } |
| 916 | /* Now write the final entry */ |
| 917 | erase_region_p->offset = begin; |
| 918 | erase_region_p->erasesize = curr_erasesize; |
| 919 | erase_region_p->numblocks = (position - begin) / curr_erasesize; |
| 920 | } |
| 921 | |
| 922 | return &concat->mtd; |
| 923 | } |
| 924 | |
Thomas Gleixner | 97894cd | 2005-11-07 11:15:26 +0000 | [diff] [blame] | 925 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | * This function destroys an MTD object obtained from concat_mtd_devs() |
| 927 | */ |
| 928 | |
| 929 | void mtd_concat_destroy(struct mtd_info *mtd) |
| 930 | { |
| 931 | struct mtd_concat *concat = CONCAT(mtd); |
| 932 | if (concat->mtd.numeraseregions) |
| 933 | kfree(concat->mtd.eraseregions); |
| 934 | kfree(concat); |
| 935 | } |
| 936 | |
| 937 | EXPORT_SYMBOL(mtd_concat_create); |
| 938 | EXPORT_SYMBOL(mtd_concat_destroy); |
| 939 | |
| 940 | MODULE_LICENSE("GPL"); |
| 941 | MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>"); |
| 942 | MODULE_DESCRIPTION("Generic support for concatenating of MTD devices"); |