blob: a9b4e344c55c7705ad063a4fcbb8b2e7cd91cd88 [file] [log] [blame]
Maxim Levitsky7d17c022010-02-22 20:39:41 +02001/*
2 * Copyright © 2009 - Maxim Levitsky
3 * SmartMedia/xD translation layer
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/random.h>
13#include <linux/hdreg.h>
14#include <linux/kthread.h>
15#include <linux/freezer.h>
16#include <linux/sysfs.h>
17#include <linux/bitops.h>
Maxim Levitsky01de69c2010-03-19 14:05:51 +020018#include <linux/mtd/nand_ecc.h>
Maxim Levitsky7d17c022010-02-22 20:39:41 +020019#include "nand/sm_common.h"
20#include "sm_ftl.h"
21
Maxim Levitsky7d17c022010-02-22 20:39:41 +020022
23
24struct workqueue_struct *cache_flush_workqueue;
25
26static int cache_timeout = 1000;
27module_param(cache_timeout, bool, S_IRUGO);
28MODULE_PARM_DESC(cache_timeout,
29 "Timeout (in ms) for cache flush (1000 ms default");
30
31static int debug;
32module_param(debug, int, S_IRUGO | S_IWUSR);
33MODULE_PARM_DESC(debug, "Debug level (0-2)");
34
35
36/* ------------------- sysfs attributtes ---------------------------------- */
37struct sm_sysfs_attribute {
38 struct device_attribute dev_attr;
39 char *data;
40 int len;
41};
42
43ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
44 char *buf)
45{
46 struct sm_sysfs_attribute *sm_attr =
47 container_of(attr, struct sm_sysfs_attribute, dev_attr);
48
49 strncpy(buf, sm_attr->data, sm_attr->len);
50 return sm_attr->len;
51}
52
53
54#define NUM_ATTRIBUTES 1
55#define SM_CIS_VENDOR_OFFSET 0x59
56struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
57{
58 struct attribute_group *attr_group;
59 struct attribute **attributes;
60 struct sm_sysfs_attribute *vendor_attribute;
61
62 int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
63 SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
64
65 char *vendor = kmalloc(vendor_len, GFP_KERNEL);
66 memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
67 vendor[vendor_len] = 0;
68
69 /* Initialize sysfs attributes */
70 vendor_attribute =
71 kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
72
Maxim Levitskyca7081d2010-03-19 17:22:53 +020073 sysfs_attr_init(&vendor_attribute->dev_attr.attr);
74
Maxim Levitsky7d17c022010-02-22 20:39:41 +020075 vendor_attribute->data = vendor;
76 vendor_attribute->len = vendor_len;
77 vendor_attribute->dev_attr.attr.name = "vendor";
78 vendor_attribute->dev_attr.attr.mode = S_IRUGO;
79 vendor_attribute->dev_attr.show = sm_attr_show;
80
81
82 /* Create array of pointers to the attributes */
83 attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
84 GFP_KERNEL);
85 attributes[0] = &vendor_attribute->dev_attr.attr;
86
87 /* Finally create the attribute group */
88 attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
89 attr_group->attrs = attributes;
90 return attr_group;
91}
92
93void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
94{
95 struct attribute **attributes = ftl->disk_attributes->attrs;
96 int i;
97
98 for (i = 0; attributes[i] ; i++) {
99
100 struct device_attribute *dev_attr = container_of(attributes[i],
101 struct device_attribute, attr);
102
103 struct sm_sysfs_attribute *sm_attr =
104 container_of(dev_attr,
105 struct sm_sysfs_attribute, dev_attr);
106
107 kfree(sm_attr->data);
108 kfree(sm_attr);
109 }
110
111 kfree(ftl->disk_attributes->attrs);
112 kfree(ftl->disk_attributes);
113}
114
115
116/* ----------------------- oob helpers -------------------------------------- */
117
118static int sm_get_lba(uint8_t *lba)
119{
120 /* check fixed bits */
121 if ((lba[0] & 0xF8) != 0x10)
122 return -2;
123
124 /* check parity - endianess doesn't matter */
125 if (hweight16(*(uint16_t *)lba) & 1)
126 return -2;
127
128 return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
129}
130
131
132/*
133 * Read LBA asscociated with block
134 * returns -1, if block is erased
135 * returns -2 if error happens
136 */
137static int sm_read_lba(struct sm_oob *oob)
138{
139 static const uint32_t erased_pattern[4] = {
140 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
141
142 uint16_t lba_test;
143 int lba;
144
145 /* First test for erased block */
146 if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
147 return -1;
148
149 /* Now check is both copies of the LBA differ too much */
150 lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
151 if (lba_test && !is_power_of_2(lba_test))
152 return -2;
153
154 /* And read it */
155 lba = sm_get_lba(oob->lba_copy1);
156
157 if (lba == -2)
158 lba = sm_get_lba(oob->lba_copy2);
159
160 return lba;
161}
162
163static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
164{
165 uint8_t tmp[2];
166
167 WARN_ON(lba >= 1000);
168
169 tmp[0] = 0x10 | ((lba >> 7) & 0x07);
170 tmp[1] = (lba << 1) & 0xFF;
171
172 if (hweight16(*(uint16_t *)tmp) & 0x01)
173 tmp[1] |= 1;
174
175 oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
176 oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
177}
178
179
180/* Make offset from parts */
181static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
182{
183 WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
184 WARN_ON(zone < 0 || zone >= ftl->zone_count);
185 WARN_ON(block >= ftl->zone_size);
186 WARN_ON(boffset >= ftl->block_size);
187
188 if (block == -1)
189 return -1;
190
191 return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
192}
193
194/* Breaks offset into parts */
195static void sm_break_offset(struct sm_ftl *ftl, loff_t offset,
196 int *zone, int *block, int *boffset)
197{
198 *boffset = do_div(offset, ftl->block_size);
199 *block = do_div(offset, ftl->max_lba);
200 *zone = offset >= ftl->zone_count ? -1 : offset;
201}
202
203/* ---------------------- low level IO ------------------------------------- */
204
205static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
206{
Maxim Levitsky7d17c022010-02-22 20:39:41 +0200207 uint8_t ecc[3];
208
209 __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
210 if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
211 return -EIO;
212
213 buffer += SM_SMALL_PAGE;
214
215 __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
216 if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
217 return -EIO;
Maxim Levitsky7d17c022010-02-22 20:39:41 +0200218 return 0;
219}
220
221/* Reads a sector + oob*/
222static int sm_read_sector(struct sm_ftl *ftl,
223 int zone, int block, int boffset,
224 uint8_t *buffer, struct sm_oob *oob)
225{
226 struct mtd_info *mtd = ftl->trans->mtd;
227 struct mtd_oob_ops ops;
228 struct sm_oob tmp_oob;
Maxim Levitsky133fa8c2010-02-26 22:08:40 +0200229 int ret = -EIO;
Maxim Levitsky7d17c022010-02-22 20:39:41 +0200230 int try = 0;
231
232 /* FTL can contain -1 entries that are by default filled with bits */
233 if (block == -1) {
234 memset(buffer, 0xFF, SM_SECTOR_SIZE);
235 return 0;
236 }
237
238 /* User might not need the oob, but we do for data vertification */
239 if (!oob)
240 oob = &tmp_oob;
241
242 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
243 ops.ooboffs = 0;
244 ops.ooblen = SM_OOB_SIZE;
245 ops.oobbuf = (void *)oob;
246 ops.len = SM_SECTOR_SIZE;
247 ops.datbuf = buffer;
248
249again:
250 if (try++) {
251 /* Avoid infinite recursion on CIS reads, sm_recheck_media
252 won't help anyway */
253 if (zone == 0 && block == ftl->cis_block && boffset ==
254 ftl->cis_boffset)
255 return ret;
256
257 /* Test if media is stable */
258 if (try == 3 || sm_recheck_media(ftl))
259 return ret;
260 }
261
262 /* Unfortunelly, oob read will _always_ succeed,
263 despite card removal..... */
264 ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
265
266 /* Test for unknown errors */
267 if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
268 dbg("read of block %d at zone %d, failed due to error (%d)",
269 block, zone, ret);
270 goto again;
271 }
272
273 /* Do a basic test on the oob, to guard against returned garbage */
274 if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
275 goto again;
276
277 /* This should never happen, unless there is a bug in the mtd driver */
278 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
279 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
280
281 if (!buffer)
282 return 0;
283
284 /* Test if sector marked as bad */
285 if (!sm_sector_valid(oob)) {
286 dbg("read of block %d at zone %d, failed because it is marked"
287 " as bad" , block, zone);
288 goto again;
289 }
290
291 /* Test ECC*/
292 if (ret == -EBADMSG ||
293 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
294
295 dbg("read of block %d at zone %d, failed due to ECC error",
296 block, zone);
297 goto again;
298 }
299
300 return 0;
301}
302
303/* Writes a sector to media */
304static int sm_write_sector(struct sm_ftl *ftl,
305 int zone, int block, int boffset,
306 uint8_t *buffer, struct sm_oob *oob)
307{
308 struct mtd_oob_ops ops;
309 struct mtd_info *mtd = ftl->trans->mtd;
310 int ret;
311
312 BUG_ON(ftl->readonly);
313
314 if (zone == 0 && (block == ftl->cis_block || block == 0)) {
315 dbg("attempted to write the CIS!");
316 return -EIO;
317 }
318
319 if (ftl->unstable)
320 return -EIO;
321
322 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
323 ops.len = SM_SECTOR_SIZE;
324 ops.datbuf = buffer;
325 ops.ooboffs = 0;
326 ops.ooblen = SM_OOB_SIZE;
327 ops.oobbuf = (void *)oob;
328
329 ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
330
331 /* Now we assume that hardware will catch write bitflip errors */
332 /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
333
334 if (ret) {
335 dbg("write to block %d at zone %d, failed with error %d",
336 block, zone, ret);
337
338 sm_recheck_media(ftl);
339 return ret;
340 }
341
342 /* This should never happen, unless there is a bug in the driver */
343 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
344 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
345
346 return 0;
347}
348
349/* ------------------------ block IO ------------------------------------- */
350
351/* Write a block using data and lba, and invalid sector bitmap */
352static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
353 int zone, int block, int lba,
354 unsigned long invalid_bitmap)
355{
356 struct sm_oob oob;
357 int boffset;
358 int retry = 0;
359
360 /* Initialize the oob with requested values */
361 memset(&oob, 0xFF, SM_OOB_SIZE);
362 sm_write_lba(&oob, lba);
363restart:
364 if (ftl->unstable)
365 return -EIO;
366
367 for (boffset = 0; boffset < ftl->block_size;
368 boffset += SM_SECTOR_SIZE) {
369
370 oob.data_status = 0xFF;
371
372 if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
373
374 sm_printk("sector %d of block at LBA %d of zone %d"
375 " coudn't be read, marking it as invalid",
376 boffset / SM_SECTOR_SIZE, lba, zone);
377
378 oob.data_status = 0;
379 }
380
Maxim Levitsky7d17c022010-02-22 20:39:41 +0200381 if (ftl->smallpagenand) {
382 __nand_calculate_ecc(buf + boffset,
383 SM_SMALL_PAGE, oob.ecc1);
384
385 __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
386 SM_SMALL_PAGE, oob.ecc2);
387 }
Maxim Levitsky7d17c022010-02-22 20:39:41 +0200388 if (!sm_write_sector(ftl, zone, block, boffset,
389 buf + boffset, &oob))
390 continue;
391
392 if (!retry) {
393
394 /* If write fails. try to erase the block */
395 /* This is safe, because we never write in blocks
396 that contain valuable data.
397 This is intended to repair block that are marked
398 as erased, but that isn't fully erased*/
399
400 if (sm_erase_block(ftl, zone, block, 0))
401 return -EIO;
402
403 retry = 1;
404 goto restart;
405 } else {
406 sm_mark_block_bad(ftl, zone, block);
407 return -EIO;
408 }
409 }
410 return 0;
411}
412
413
414/* Mark whole block at offset 'offs' as bad. */
415static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
416{
417 struct sm_oob oob;
418 int boffset;
419
420 memset(&oob, 0xFF, SM_OOB_SIZE);
421 oob.block_status = 0xF0;
422
423 if (ftl->unstable)
424 return;
425
426 if (sm_recheck_media(ftl))
427 return;
428
429 sm_printk("marking block %d of zone %d as bad", block, zone);
430
431 /* We aren't checking the return value, because we don't care */
432 /* This also fails on fake xD cards, but I guess these won't expose
433 any bad blocks till fail completly */
434 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
435 sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
436}
437
438/*
439 * Erase a block within a zone
440 * If erase succedes, it updates free block fifo, otherwise marks block as bad
441 */
442static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
443 int put_free)
444{
445 struct ftl_zone *zone = &ftl->zones[zone_num];
446 struct mtd_info *mtd = ftl->trans->mtd;
447 struct erase_info erase;
448
449 erase.mtd = mtd;
450 erase.callback = sm_erase_callback;
451 erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
452 erase.len = ftl->block_size;
453 erase.priv = (u_long)ftl;
454
455 if (ftl->unstable)
456 return -EIO;
457
458 BUG_ON(ftl->readonly);
459
460 if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
461 sm_printk("attempted to erase the CIS!");
462 return -EIO;
463 }
464
465 if (mtd->erase(mtd, &erase)) {
466 sm_printk("erase of block %d in zone %d failed",
467 block, zone_num);
468 goto error;
469 }
470
471 if (erase.state == MTD_ERASE_PENDING)
472 wait_for_completion(&ftl->erase_completion);
473
474 if (erase.state != MTD_ERASE_DONE) {
475 sm_printk("erase of block %d in zone %d failed after wait",
476 block, zone_num);
477 goto error;
478 }
479
480 if (put_free)
481 kfifo_in(&zone->free_sectors,
482 (const unsigned char *)&block, sizeof(block));
483
484 return 0;
485error:
486 sm_mark_block_bad(ftl, zone_num, block);
487 return -EIO;
488}
489
490static void sm_erase_callback(struct erase_info *self)
491{
492 struct sm_ftl *ftl = (struct sm_ftl *)self->priv;
493 complete(&ftl->erase_completion);
494}
495
496/* Throughtly test that block is valid. */
497static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
498{
499 int boffset;
500 struct sm_oob oob;
501 int lbas[] = { -3, 0, 0, 0 };
502 int i = 0;
503 int test_lba;
504
505
506 /* First just check that block doesn't look fishy */
507 /* Only blocks that are valid or are sliced in two parts, are
508 accepted */
509 for (boffset = 0; boffset < ftl->block_size;
510 boffset += SM_SECTOR_SIZE) {
511
512 /* This shoudn't happen anyway */
513 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
514 return -2;
515
516 test_lba = sm_read_lba(&oob);
517
518 if (lbas[i] != test_lba)
519 lbas[++i] = test_lba;
520
521 /* If we found three different LBAs, something is fishy */
522 if (i == 3)
523 return -EIO;
524 }
525
526 /* If the block is sliced (partialy erased usually) erase it */
527 if (i == 2) {
528 sm_erase_block(ftl, zone, block, 1);
529 return 1;
530 }
531
532 return 0;
533}
534
535/* ----------------- media scanning --------------------------------- */
536static const struct chs_entry chs_table[] = {
537 { 1, 125, 4, 4 },
538 { 2, 125, 4, 8 },
539 { 4, 250, 4, 8 },
540 { 8, 250, 4, 16 },
541 { 16, 500, 4, 16 },
542 { 32, 500, 8, 16 },
543 { 64, 500, 8, 32 },
544 { 128, 500, 16, 32 },
545 { 256, 1000, 16, 32 },
546 { 512, 1015, 32, 63 },
547 { 1024, 985, 33, 63 },
548 { 2048, 985, 33, 63 },
549 { 0 },
550};
551
552
553static const uint8_t cis_signature[] = {
554 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
555};
556/* Find out media parameters.
557 * This ideally has to be based on nand id, but for now device size is enough */
558int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
559{
560 int i;
561 int size_in_megs = mtd->size / (1024 * 1024);
562
563 ftl->readonly = mtd->type == MTD_ROM;
564
565 /* Manual settings for very old devices */
566 ftl->zone_count = 1;
567 ftl->smallpagenand = 0;
568
569 switch (size_in_megs) {
570 case 1:
571 /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
572 ftl->zone_size = 256;
573 ftl->max_lba = 250;
574 ftl->block_size = 8 * SM_SECTOR_SIZE;
575 ftl->smallpagenand = 1;
576
577 break;
578 case 2:
579 /* 2 MiB flash SmartMedia (256 byte pages)*/
580 if (mtd->writesize == SM_SMALL_PAGE) {
581 ftl->zone_size = 512;
582 ftl->max_lba = 500;
583 ftl->block_size = 8 * SM_SECTOR_SIZE;
584 ftl->smallpagenand = 1;
585 /* 2 MiB rom SmartMedia */
586 } else {
587
588 if (!ftl->readonly)
589 return -ENODEV;
590
591 ftl->zone_size = 256;
592 ftl->max_lba = 250;
593 ftl->block_size = 16 * SM_SECTOR_SIZE;
594 }
595 break;
596 case 4:
597 /* 4 MiB flash/rom SmartMedia device */
598 ftl->zone_size = 512;
599 ftl->max_lba = 500;
600 ftl->block_size = 16 * SM_SECTOR_SIZE;
601 break;
602 case 8:
603 /* 8 MiB flash/rom SmartMedia device */
604 ftl->zone_size = 1024;
605 ftl->max_lba = 1000;
606 ftl->block_size = 16 * SM_SECTOR_SIZE;
607 }
608
609 /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
610 sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
611 if (size_in_megs >= 16) {
612 ftl->zone_count = size_in_megs / 16;
613 ftl->zone_size = 1024;
614 ftl->max_lba = 1000;
615 ftl->block_size = 32 * SM_SECTOR_SIZE;
616 }
617
618 /* Test for proper write,erase and oob sizes */
619 if (mtd->erasesize > ftl->block_size)
620 return -ENODEV;
621
622 if (mtd->writesize > SM_SECTOR_SIZE)
623 return -ENODEV;
624
625 if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
626 return -ENODEV;
627
628 if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
629 return -ENODEV;
630
631 /* We use these functions for IO */
632 if (!mtd->read_oob || !mtd->write_oob)
633 return -ENODEV;
634
635 /* Find geometry information */
636 for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
637 if (chs_table[i].size == size_in_megs) {
638 ftl->cylinders = chs_table[i].cyl;
639 ftl->heads = chs_table[i].head;
640 ftl->sectors = chs_table[i].sec;
641 return 0;
642 }
643 }
644
645 sm_printk("media has unknown size : %dMiB", size_in_megs);
646 ftl->cylinders = 985;
647 ftl->heads = 33;
648 ftl->sectors = 63;
649 return 0;
650}
651
652/* Validate the CIS */
653static int sm_read_cis(struct sm_ftl *ftl)
654{
655 struct sm_oob oob;
656
657 if (sm_read_sector(ftl,
658 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
659 return -EIO;
660
661 if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
662 return -EIO;
663
664 if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
665 cis_signature, sizeof(cis_signature))) {
666 return 0;
667 }
668
669 return -EIO;
670}
671
672/* Scan the media for the CIS */
673static int sm_find_cis(struct sm_ftl *ftl)
674{
675 struct sm_oob oob;
676 int block, boffset;
677 int block_found = 0;
678 int cis_found = 0;
679
680 /* Search for first valid block */
681 for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
682
683 if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
684 continue;
685
686 if (!sm_block_valid(&oob))
687 continue;
688 block_found = 1;
689 break;
690 }
691
692 if (!block_found)
693 return -EIO;
694
695 /* Search for first valid sector in this block */
696 for (boffset = 0 ; boffset < ftl->block_size;
697 boffset += SM_SECTOR_SIZE) {
698
699 if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
700 continue;
701
702 if (!sm_sector_valid(&oob))
703 continue;
704 break;
705 }
706
707 if (boffset == ftl->block_size)
708 return -EIO;
709
710 ftl->cis_block = block;
711 ftl->cis_boffset = boffset;
712 ftl->cis_page_offset = 0;
713
714 cis_found = !sm_read_cis(ftl);
715
716 if (!cis_found) {
717 ftl->cis_page_offset = SM_SMALL_PAGE;
718 cis_found = !sm_read_cis(ftl);
719 }
720
721 if (cis_found) {
722 dbg("CIS block found at offset %x",
723 block * ftl->block_size +
724 boffset + ftl->cis_page_offset);
725 return 0;
726 }
727 return -EIO;
728}
729
730/* Basic test to determine if underlying mtd device if functional */
731static int sm_recheck_media(struct sm_ftl *ftl)
732{
733 if (sm_read_cis(ftl)) {
734
735 if (!ftl->unstable) {
736 sm_printk("media unstable, not allowing writes");
737 ftl->unstable = 1;
738 }
739 return -EIO;
740 }
741 return 0;
742}
743
744/* Initialize a FTL zone */
745static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
746{
747 struct ftl_zone *zone = &ftl->zones[zone_num];
748 struct sm_oob oob;
749 uint16_t block;
750 int lba;
751 int i = 0;
Maxim Levitsky133fa8c2010-02-26 22:08:40 +0200752 int len;
Maxim Levitsky7d17c022010-02-22 20:39:41 +0200753
754 dbg("initializing zone %d", zone_num);
755
756 /* Allocate memory for FTL table */
757 zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
758
759 if (!zone->lba_to_phys_table)
760 return -ENOMEM;
761 memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
762
763
764 /* Allocate memory for free sectors FIFO */
765 if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
766 kfree(zone->lba_to_phys_table);
767 return -ENOMEM;
768 }
769
770 /* Now scan the zone */
771 for (block = 0 ; block < ftl->zone_size ; block++) {
772
773 /* Skip blocks till the CIS (including) */
774 if (zone_num == 0 && block <= ftl->cis_block)
775 continue;
776
777 /* Read the oob of first sector */
778 if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob))
779 return -EIO;
780
781 /* Test to see if block is erased. It is enough to test
782 first sector, because erase happens in one shot */
783 if (sm_block_erased(&oob)) {
784 kfifo_in(&zone->free_sectors,
785 (unsigned char *)&block, 2);
786 continue;
787 }
788
789 /* If block is marked as bad, skip it */
790 /* This assumes we can trust first sector*/
791 /* However the way the block valid status is defined, ensures
792 very low probability of failure here */
793 if (!sm_block_valid(&oob)) {
794 dbg("PH %04d <-> <marked bad>", block);
795 continue;
796 }
797
798
799 lba = sm_read_lba(&oob);
800
801 /* Invalid LBA means that block is damaged. */
802 /* We can try to erase it, or mark it as bad, but
803 lets leave that to recovery application */
804 if (lba == -2 || lba >= ftl->max_lba) {
805 dbg("PH %04d <-> LBA %04d(bad)", block, lba);
806 continue;
807 }
808
809
810 /* If there is no collision,
811 just put the sector in the FTL table */
812 if (zone->lba_to_phys_table[lba] < 0) {
813 dbg_verbose("PH %04d <-> LBA %04d", block, lba);
814 zone->lba_to_phys_table[lba] = block;
815 continue;
816 }
817
818 sm_printk("collision"
819 " of LBA %d between blocks %d and %d in zone %d",
820 lba, zone->lba_to_phys_table[lba], block, zone_num);
821
822 /* Test that this block is valid*/
823 if (sm_check_block(ftl, zone_num, block))
824 continue;
825
826 /* Test now the old block */
827 if (sm_check_block(ftl, zone_num,
828 zone->lba_to_phys_table[lba])) {
829 zone->lba_to_phys_table[lba] = block;
830 continue;
831 }
832
833 /* If both blocks are valid and share same LBA, it means that
834 they hold different versions of same data. It not
835 known which is more recent, thus just erase one of them
836 */
837 sm_printk("both blocks are valid, erasing the later");
838 sm_erase_block(ftl, zone_num, block, 1);
839 }
840
841 dbg("zone initialized");
842 zone->initialized = 1;
843
844 /* No free sectors, means that the zone is heavily damaged, write won't
845 work, but it can still can be (partially) read */
846 if (!kfifo_len(&zone->free_sectors)) {
847 sm_printk("no free blocks in zone %d", zone_num);
848 return 0;
849 }
850
851 /* Randomize first block we write to */
852 get_random_bytes(&i, 2);
853 i %= (kfifo_len(&zone->free_sectors) / 2);
854
855 while (i--) {
Maxim Levitsky133fa8c2010-02-26 22:08:40 +0200856 len = kfifo_out(&zone->free_sectors,
857 (unsigned char *)&block, 2);
858 WARN_ON(len != 2);
Maxim Levitsky7d17c022010-02-22 20:39:41 +0200859 kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
860 }
861 return 0;
862}
863
864/* Get and automaticly initialize an FTL mapping for one zone */
865struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
866{
867 struct ftl_zone *zone;
868 int error;
869
870 BUG_ON(zone_num >= ftl->zone_count);
871 zone = &ftl->zones[zone_num];
872
873 if (!zone->initialized) {
874 error = sm_init_zone(ftl, zone_num);
875
876 if (error)
877 return ERR_PTR(error);
878 }
879 return zone;
880}
881
882
883/* ----------------- cache handling ------------------------------------------*/
884
885/* Initialize the one block cache */
886void sm_cache_init(struct sm_ftl *ftl)
887{
888 ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
889 ftl->cache_clean = 1;
890 ftl->cache_zone = -1;
891 ftl->cache_block = -1;
892 /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
893}
894
895/* Put sector in one block cache */
896void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
897{
898 memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
899 clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
900 ftl->cache_clean = 0;
901}
902
903/* Read a sector from the cache */
904int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
905{
906 if (test_bit(boffset / SM_SECTOR_SIZE,
907 &ftl->cache_data_invalid_bitmap))
908 return -1;
909
910 memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
911 return 0;
912}
913
914/* Write the cache to hardware */
915int sm_cache_flush(struct sm_ftl *ftl)
916{
917 struct ftl_zone *zone;
918
919 int sector_num;
920 uint16_t write_sector;
921 int zone_num = ftl->cache_zone;
922 int block_num;
923
924 if (ftl->cache_clean)
925 return 0;
926
927 if (ftl->unstable)
928 return -EIO;
929
930 BUG_ON(zone_num < 0);
931 zone = &ftl->zones[zone_num];
932 block_num = zone->lba_to_phys_table[ftl->cache_block];
933
934
935 /* Try to read all unread areas of the cache block*/
936 for_each_bit(sector_num, &ftl->cache_data_invalid_bitmap,
937 ftl->block_size / SM_SECTOR_SIZE) {
938
939 if (!sm_read_sector(ftl,
940 zone_num, block_num, sector_num * SM_SECTOR_SIZE,
941 ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
942 clear_bit(sector_num,
943 &ftl->cache_data_invalid_bitmap);
944 }
945restart:
946
947 if (ftl->unstable)
948 return -EIO;
Maxim Levitsky133fa8c2010-02-26 22:08:40 +0200949
950 /* If there are no spare blocks, */
951 /* we could still continue by erasing/writing the current block,
Maxim Levitsky7d17c022010-02-22 20:39:41 +0200952 but for such worn out media it doesn't worth the trouble,
953 and the dangers */
Maxim Levitsky133fa8c2010-02-26 22:08:40 +0200954 if (kfifo_out(&zone->free_sectors,
955 (unsigned char *)&write_sector, 2) != 2) {
Maxim Levitsky7d17c022010-02-22 20:39:41 +0200956 dbg("no free sectors for write!");
957 return -EIO;
958 }
959
Maxim Levitsky7d17c022010-02-22 20:39:41 +0200960
961 if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
962 ftl->cache_block, ftl->cache_data_invalid_bitmap))
963 goto restart;
964
965 /* Update the FTL table */
966 zone->lba_to_phys_table[ftl->cache_block] = write_sector;
967
968 /* Write succesfull, so erase and free the old block */
969 if (block_num > 0)
970 sm_erase_block(ftl, zone_num, block_num, 1);
971
972 sm_cache_init(ftl);
973 return 0;
974}
975
976
977/* flush timer, runs a second after last write */
978static void sm_cache_flush_timer(unsigned long data)
979{
980 struct sm_ftl *ftl = (struct sm_ftl *)data;
981 queue_work(cache_flush_workqueue, &ftl->flush_work);
982}
983
984/* cache flush work, kicked by timer */
985static void sm_cache_flush_work(struct work_struct *work)
986{
987 struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
988 mutex_lock(&ftl->mutex);
989 sm_cache_flush(ftl);
990 mutex_unlock(&ftl->mutex);
991 return;
992}
993
994/* ---------------- outside interface -------------------------------------- */
995
996/* outside interface: read a sector */
997static int sm_read(struct mtd_blktrans_dev *dev,
998 unsigned long sect_no, char *buf)
999{
1000 struct sm_ftl *ftl = dev->priv;
1001 struct ftl_zone *zone;
1002 int error = 0, in_cache = 0;
1003 int zone_num, block, boffset;
1004
1005 sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
1006 mutex_lock(&ftl->mutex);
1007
1008
1009 zone = sm_get_zone(ftl, zone_num);
1010 if (IS_ERR(zone)) {
1011 error = PTR_ERR(zone);
1012 goto unlock;
1013 }
1014
1015 /* Have to look at cache first */
1016 if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
1017 in_cache = 1;
1018 if (!sm_cache_get(ftl, buf, boffset))
1019 goto unlock;
1020 }
1021
1022 /* Translate the block and return if doesn't exist in the table */
1023 block = zone->lba_to_phys_table[block];
1024
1025 if (block == -1) {
1026 memset(buf, 0xFF, SM_SECTOR_SIZE);
1027 goto unlock;
1028 }
1029
1030 if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
1031 error = -EIO;
1032 goto unlock;
1033 }
1034
1035 if (in_cache)
1036 sm_cache_put(ftl, buf, boffset);
1037unlock:
1038 mutex_unlock(&ftl->mutex);
1039 return error;
1040}
1041
1042/* outside interface: write a sector */
1043static int sm_write(struct mtd_blktrans_dev *dev,
1044 unsigned long sec_no, char *buf)
1045{
1046 struct sm_ftl *ftl = dev->priv;
1047 struct ftl_zone *zone;
1048 int error, zone_num, block, boffset;
1049
1050 BUG_ON(ftl->readonly);
1051 sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
1052
1053 /* No need in flush thread running now */
1054 del_timer(&ftl->timer);
1055 mutex_lock(&ftl->mutex);
1056
1057 zone = sm_get_zone(ftl, zone_num);
1058 if (IS_ERR(zone)) {
1059 error = PTR_ERR(zone);
1060 goto unlock;
1061 }
1062
1063 /* If entry is not in cache, flush it */
1064 if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
1065
1066 error = sm_cache_flush(ftl);
1067 if (error)
1068 goto unlock;
1069
1070 ftl->cache_block = block;
1071 ftl->cache_zone = zone_num;
1072 }
1073
1074 sm_cache_put(ftl, buf, boffset);
1075unlock:
1076 mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
1077 mutex_unlock(&ftl->mutex);
1078 return error;
1079}
1080
1081/* outside interface: flush everything */
1082static int sm_flush(struct mtd_blktrans_dev *dev)
1083{
1084 struct sm_ftl *ftl = dev->priv;
1085 int retval;
1086
1087 mutex_lock(&ftl->mutex);
1088 retval = sm_cache_flush(ftl);
1089 mutex_unlock(&ftl->mutex);
1090 return retval;
1091}
1092
1093/* outside interface: device is released */
1094static int sm_release(struct mtd_blktrans_dev *dev)
1095{
1096 struct sm_ftl *ftl = dev->priv;
1097
1098 mutex_lock(&ftl->mutex);
1099 del_timer_sync(&ftl->timer);
1100 cancel_work_sync(&ftl->flush_work);
1101 sm_cache_flush(ftl);
1102 mutex_unlock(&ftl->mutex);
1103 return 0;
1104}
1105
1106/* outside interface: get geometry */
1107static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
1108{
1109 struct sm_ftl *ftl = dev->priv;
1110 geo->heads = ftl->heads;
1111 geo->sectors = ftl->sectors;
1112 geo->cylinders = ftl->cylinders;
1113 return 0;
1114}
1115
1116/* external interface: main initialization function */
1117static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1118{
1119 struct mtd_blktrans_dev *trans;
1120 struct sm_ftl *ftl;
1121
1122 /* Allocate & initialize our private structure */
1123 ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
1124 if (!ftl)
1125 goto error1;
1126
1127
1128 mutex_init(&ftl->mutex);
1129 setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
1130 INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
1131 init_completion(&ftl->erase_completion);
1132
1133 /* Read media information */
1134 if (sm_get_media_info(ftl, mtd)) {
1135 dbg("found unsupported mtd device, aborting");
1136 goto error2;
1137 }
1138
1139
1140 /* Allocate temporary CIS buffer for read retry support */
1141 ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
1142 if (!ftl->cis_buffer)
1143 goto error2;
1144
1145 /* Allocate zone array, it will be initialized on demand */
1146 ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
1147 GFP_KERNEL);
1148 if (!ftl->zones)
1149 goto error3;
1150
1151 /* Allocate the cache*/
1152 ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
1153
1154 if (!ftl->cache_data)
1155 goto error4;
1156
1157 sm_cache_init(ftl);
1158
1159
1160 /* Allocate upper layer structure and initialize it */
1161 trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
1162 if (!trans)
1163 goto error5;
1164
1165 ftl->trans = trans;
1166 trans->priv = ftl;
1167
1168 trans->tr = tr;
1169 trans->mtd = mtd;
1170 trans->devnum = -1;
1171 trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
1172 trans->readonly = ftl->readonly;
1173
1174 if (sm_find_cis(ftl)) {
1175 dbg("CIS not found on mtd device, aborting");
1176 goto error6;
1177 }
1178
1179 ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
1180 trans->disk_attributes = ftl->disk_attributes;
1181
1182 sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1183 (int)(mtd->size / (1024 * 1024)), mtd->index);
1184
1185 dbg("FTL layout:");
1186 dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1187 ftl->zone_count, ftl->max_lba,
1188 ftl->zone_size - ftl->max_lba);
1189 dbg("each block consists of %d bytes",
1190 ftl->block_size);
1191
1192
1193 /* Register device*/
1194 if (add_mtd_blktrans_dev(trans)) {
1195 dbg("error in mtdblktrans layer");
1196 goto error6;
1197 }
1198 return;
1199error6:
1200 kfree(trans);
1201error5:
1202 kfree(ftl->cache_data);
1203error4:
1204 kfree(ftl->zones);
1205error3:
1206 kfree(ftl->cis_buffer);
1207error2:
1208 kfree(ftl);
1209error1:
1210 return;
1211}
1212
1213/* main interface: device {surprise,} removal */
1214static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1215{
1216 struct sm_ftl *ftl = dev->priv;
1217 int i;
1218
1219 del_mtd_blktrans_dev(dev);
1220 ftl->trans = NULL;
1221
1222 for (i = 0 ; i < ftl->zone_count; i++) {
1223
1224 if (!ftl->zones[i].initialized)
1225 continue;
1226
1227 kfree(ftl->zones[i].lba_to_phys_table);
1228 kfifo_free(&ftl->zones[i].free_sectors);
1229 }
1230
1231 sm_delete_sysfs_attributes(ftl);
1232 kfree(ftl->cis_buffer);
1233 kfree(ftl->zones);
1234 kfree(ftl->cache_data);
1235 kfree(ftl);
1236}
1237
1238static struct mtd_blktrans_ops sm_ftl_ops = {
1239 .name = "smblk",
1240 .major = -1,
1241 .part_bits = SM_FTL_PARTN_BITS,
1242 .blksize = SM_SECTOR_SIZE,
1243 .getgeo = sm_getgeo,
1244
1245 .add_mtd = sm_add_mtd,
1246 .remove_dev = sm_remove_dev,
1247
1248 .readsect = sm_read,
1249 .writesect = sm_write,
1250
1251 .flush = sm_flush,
1252 .release = sm_release,
1253
1254 .owner = THIS_MODULE,
1255};
1256
1257static __init int sm_module_init(void)
1258{
1259 int error = 0;
1260 cache_flush_workqueue = create_freezeable_workqueue("smflush");
1261
1262 if (IS_ERR(cache_flush_workqueue))
1263 return PTR_ERR(cache_flush_workqueue);
1264
1265 error = register_mtd_blktrans(&sm_ftl_ops);
1266 if (error)
1267 destroy_workqueue(cache_flush_workqueue);
1268 return error;
1269
1270}
1271
1272static void __exit sm_module_exit(void)
1273{
1274 destroy_workqueue(cache_flush_workqueue);
1275 deregister_mtd_blktrans(&sm_ftl_ops);
1276}
1277
1278module_init(sm_module_init);
1279module_exit(sm_module_exit);
1280
1281MODULE_LICENSE("GPL");
1282MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1283MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");