blob: 4b3c6263e7fcdcf22a3481f1b3e096179a71d7fe [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Todd Poynor8b491d72005-08-04 02:05:51 +01002 * $Id: mtdchar.c,v 1.74 2005/08/04 01:05:48 tpoynor Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Character-device access to raw MTD devices.
5 *
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/compatmac.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/fs.h>
16#include <asm/uaccess.h>
17
Todd Poynor9bc7b382005-06-30 01:23:27 +010018#include <linux/device.h>
19
20static struct class *mtd_class;
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22static void mtd_notify_add(struct mtd_info* mtd)
23{
24 if (!mtd)
25 return;
26
Todd Poynor9bc7b382005-06-30 01:23:27 +010027 class_device_create(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
28 NULL, "mtd%d", mtd->index);
29
30 class_device_create(mtd_class,
31 MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
32 NULL, "mtd%dro", mtd->index);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033}
34
35static void mtd_notify_remove(struct mtd_info* mtd)
36{
37 if (!mtd)
38 return;
Todd Poynor9bc7b382005-06-30 01:23:27 +010039
40 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2));
41 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1));
Linus Torvalds1da177e2005-04-16 15:20:36 -070042}
43
44static struct mtd_notifier notifier = {
45 .add = mtd_notify_add,
46 .remove = mtd_notify_remove,
47};
48
Nicolas Pitre045e9a52005-02-08 19:12:53 +000049/*
50 * We use file->private_data to store a pointer to the MTDdevice.
51 * Since alighment is at least 32 bits, we have 2 bits free for OTP
52 * modes as well.
53 */
Nicolas Pitre31f42332005-02-08 17:45:55 +000054
Nicolas Pitre045e9a52005-02-08 19:12:53 +000055#define TO_MTD(file) (struct mtd_info *)((long)((file)->private_data) & ~3L)
56
57#define MTD_MODE_OTP_FACT 1
58#define MTD_MODE_OTP_USER 2
59#define MTD_MODE(file) ((long)((file)->private_data) & 3)
60
61#define SET_MTD_MODE(file, mode) \
62 do { long __p = (long)((file)->private_data); \
63 (file)->private_data = (void *)((__p & ~3L) | mode); } while (0)
Nicolas Pitre31f42332005-02-08 17:45:55 +000064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
66{
Nicolas Pitre045e9a52005-02-08 19:12:53 +000067 struct mtd_info *mtd = TO_MTD(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69 switch (orig) {
70 case 0:
71 /* SEEK_SET */
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 break;
73 case 1:
74 /* SEEK_CUR */
Todd Poynor8b491d72005-08-04 02:05:51 +010075 offset += file->f_pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 break;
77 case 2:
78 /* SEEK_END */
Todd Poynor8b491d72005-08-04 02:05:51 +010079 offset += mtd->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 break;
81 default:
82 return -EINVAL;
83 }
84
Todd Poynor8b491d72005-08-04 02:05:51 +010085 if (offset >= 0 && offset < mtd->size)
86 return file->f_pos = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Todd Poynor8b491d72005-08-04 02:05:51 +010088 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91
92
93static int mtd_open(struct inode *inode, struct file *file)
94{
95 int minor = iminor(inode);
96 int devnum = minor >> 1;
97 struct mtd_info *mtd;
98
99 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
100
101 if (devnum >= MAX_MTD_DEVICES)
102 return -ENODEV;
103
104 /* You can't open the RO devices RW */
105 if ((file->f_mode & 2) && (minor & 1))
106 return -EACCES;
107
108 mtd = get_mtd_device(NULL, devnum);
109
110 if (!mtd)
111 return -ENODEV;
112
113 if (MTD_ABSENT == mtd->type) {
114 put_mtd_device(mtd);
115 return -ENODEV;
116 }
117
118 file->private_data = mtd;
119
120 /* You can't open it RW if it's not a writeable device */
121 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
122 put_mtd_device(mtd);
123 return -EACCES;
124 }
125
126 return 0;
127} /* mtd_open */
128
129/*====================================================================*/
130
131static int mtd_close(struct inode *inode, struct file *file)
132{
133 struct mtd_info *mtd;
134
135 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
136
Nicolas Pitre045e9a52005-02-08 19:12:53 +0000137 mtd = TO_MTD(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139 if (mtd->sync)
140 mtd->sync(mtd);
141
142 put_mtd_device(mtd);
143
144 return 0;
145} /* mtd_close */
146
147/* FIXME: This _really_ needs to die. In 2.5, we should lock the
148 userspace buffer down and use it directly with readv/writev.
149*/
150#define MAX_KMALLOC_SIZE 0x20000
151
152static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
153{
Nicolas Pitre045e9a52005-02-08 19:12:53 +0000154 struct mtd_info *mtd = TO_MTD(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 size_t retlen=0;
156 size_t total_retlen=0;
157 int ret=0;
158 int len;
159 char *kbuf;
160
161 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
162
163 if (*ppos + count > mtd->size)
164 count = mtd->size - *ppos;
165
166 if (!count)
167 return 0;
168
169 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
170 and pass them directly to the MTD functions */
171 while (count) {
172 if (count > MAX_KMALLOC_SIZE)
173 len = MAX_KMALLOC_SIZE;
174 else
175 len = count;
176
177 kbuf=kmalloc(len,GFP_KERNEL);
178 if (!kbuf)
179 return -ENOMEM;
180
Nicolas Pitre045e9a52005-02-08 19:12:53 +0000181 switch (MTD_MODE(file)) {
Nicolas Pitre31f42332005-02-08 17:45:55 +0000182 case MTD_MODE_OTP_FACT:
183 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
184 break;
185 case MTD_MODE_OTP_USER:
186 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
187 break;
188 default:
189 ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf);
190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 /* Nand returns -EBADMSG on ecc errors, but it returns
192 * the data. For our userspace tools it is important
193 * to dump areas with ecc errors !
194 * Userspace software which accesses NAND this way
195 * must be aware of the fact that it deals with NAND
196 */
197 if (!ret || (ret == -EBADMSG)) {
198 *ppos += retlen;
199 if (copy_to_user(buf, kbuf, retlen)) {
200 kfree(kbuf);
201 return -EFAULT;
202 }
203 else
204 total_retlen += retlen;
205
206 count -= retlen;
207 buf += retlen;
Nicolas Pitre31f42332005-02-08 17:45:55 +0000208 if (retlen == 0)
209 count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 }
211 else {
212 kfree(kbuf);
213 return ret;
214 }
215
216 kfree(kbuf);
217 }
218
219 return total_retlen;
220} /* mtd_read */
221
222static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
223{
Nicolas Pitre045e9a52005-02-08 19:12:53 +0000224 struct mtd_info *mtd = TO_MTD(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 char *kbuf;
226 size_t retlen;
227 size_t total_retlen=0;
228 int ret=0;
229 int len;
230
231 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
232
233 if (*ppos == mtd->size)
234 return -ENOSPC;
235
236 if (*ppos + count > mtd->size)
237 count = mtd->size - *ppos;
238
239 if (!count)
240 return 0;
241
242 while (count) {
243 if (count > MAX_KMALLOC_SIZE)
244 len = MAX_KMALLOC_SIZE;
245 else
246 len = count;
247
248 kbuf=kmalloc(len,GFP_KERNEL);
249 if (!kbuf) {
250 printk("kmalloc is null\n");
251 return -ENOMEM;
252 }
253
254 if (copy_from_user(kbuf, buf, len)) {
255 kfree(kbuf);
256 return -EFAULT;
257 }
258
Nicolas Pitre045e9a52005-02-08 19:12:53 +0000259 switch (MTD_MODE(file)) {
Nicolas Pitre31f42332005-02-08 17:45:55 +0000260 case MTD_MODE_OTP_FACT:
261 ret = -EROFS;
262 break;
263 case MTD_MODE_OTP_USER:
264 if (!mtd->write_user_prot_reg) {
265 ret = -EOPNOTSUPP;
266 break;
267 }
268 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
269 break;
270 default:
271 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
272 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 if (!ret) {
274 *ppos += retlen;
275 total_retlen += retlen;
276 count -= retlen;
277 buf += retlen;
278 }
279 else {
280 kfree(kbuf);
281 return ret;
282 }
283
284 kfree(kbuf);
285 }
286
287 return total_retlen;
288} /* mtd_write */
289
290/*======================================================================
291
292 IOCTL calls for getting device parameters.
293
294======================================================================*/
295static void mtdchar_erase_callback (struct erase_info *instr)
296{
297 wake_up((wait_queue_head_t *)instr->priv);
298}
299
300static int mtd_ioctl(struct inode *inode, struct file *file,
301 u_int cmd, u_long arg)
302{
Nicolas Pitre045e9a52005-02-08 19:12:53 +0000303 struct mtd_info *mtd = TO_MTD(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 void __user *argp = (void __user *)arg;
305 int ret = 0;
306 u_long size;
307
308 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
309
310 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
311 if (cmd & IOC_IN) {
312 if (!access_ok(VERIFY_READ, argp, size))
313 return -EFAULT;
314 }
315 if (cmd & IOC_OUT) {
316 if (!access_ok(VERIFY_WRITE, argp, size))
317 return -EFAULT;
318 }
319
320 switch (cmd) {
321 case MEMGETREGIONCOUNT:
322 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
323 return -EFAULT;
324 break;
325
326 case MEMGETREGIONINFO:
327 {
328 struct region_info_user ur;
329
330 if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
331 return -EFAULT;
332
333 if (ur.regionindex >= mtd->numeraseregions)
334 return -EINVAL;
335 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
336 sizeof(struct mtd_erase_region_info)))
337 return -EFAULT;
338 break;
339 }
340
341 case MEMGETINFO:
342 if (copy_to_user(argp, mtd, sizeof(struct mtd_info_user)))
343 return -EFAULT;
344 break;
345
346 case MEMERASE:
347 {
348 struct erase_info *erase;
349
350 if(!(file->f_mode & 2))
351 return -EPERM;
352
353 erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
354 if (!erase)
355 ret = -ENOMEM;
356 else {
357 wait_queue_head_t waitq;
358 DECLARE_WAITQUEUE(wait, current);
359
360 init_waitqueue_head(&waitq);
361
362 memset (erase,0,sizeof(struct erase_info));
363 if (copy_from_user(&erase->addr, argp,
364 sizeof(struct erase_info_user))) {
365 kfree(erase);
366 return -EFAULT;
367 }
368 erase->mtd = mtd;
369 erase->callback = mtdchar_erase_callback;
370 erase->priv = (unsigned long)&waitq;
371
372 /*
373 FIXME: Allow INTERRUPTIBLE. Which means
374 not having the wait_queue head on the stack.
375
376 If the wq_head is on the stack, and we
377 leave because we got interrupted, then the
378 wq_head is no longer there when the
379 callback routine tries to wake us up.
380 */
381 ret = mtd->erase(mtd, erase);
382 if (!ret) {
383 set_current_state(TASK_UNINTERRUPTIBLE);
384 add_wait_queue(&waitq, &wait);
385 if (erase->state != MTD_ERASE_DONE &&
386 erase->state != MTD_ERASE_FAILED)
387 schedule();
388 remove_wait_queue(&waitq, &wait);
389 set_current_state(TASK_RUNNING);
390
391 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
392 }
393 kfree(erase);
394 }
395 break;
396 }
397
398 case MEMWRITEOOB:
399 {
400 struct mtd_oob_buf buf;
401 void *databuf;
402 ssize_t retlen;
403
404 if(!(file->f_mode & 2))
405 return -EPERM;
406
407 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
408 return -EFAULT;
409
410 if (buf.length > 0x4096)
411 return -EINVAL;
412
413 if (!mtd->write_oob)
414 ret = -EOPNOTSUPP;
415 else
416 ret = access_ok(VERIFY_READ, buf.ptr,
417 buf.length) ? 0 : EFAULT;
418
419 if (ret)
420 return ret;
421
422 databuf = kmalloc(buf.length, GFP_KERNEL);
423 if (!databuf)
424 return -ENOMEM;
425
426 if (copy_from_user(databuf, buf.ptr, buf.length)) {
427 kfree(databuf);
428 return -EFAULT;
429 }
430
431 ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf);
432
433 if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t)))
434 ret = -EFAULT;
435
436 kfree(databuf);
437 break;
438
439 }
440
441 case MEMREADOOB:
442 {
443 struct mtd_oob_buf buf;
444 void *databuf;
445 ssize_t retlen;
446
447 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
448 return -EFAULT;
449
450 if (buf.length > 0x4096)
451 return -EINVAL;
452
453 if (!mtd->read_oob)
454 ret = -EOPNOTSUPP;
455 else
456 ret = access_ok(VERIFY_WRITE, buf.ptr,
457 buf.length) ? 0 : -EFAULT;
458
459 if (ret)
460 return ret;
461
462 databuf = kmalloc(buf.length, GFP_KERNEL);
463 if (!databuf)
464 return -ENOMEM;
465
466 ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf);
467
468 if (put_user(retlen, (uint32_t __user *)argp))
469 ret = -EFAULT;
470 else if (retlen && copy_to_user(buf.ptr, databuf, retlen))
471 ret = -EFAULT;
472
473 kfree(databuf);
474 break;
475 }
476
477 case MEMLOCK:
478 {
479 struct erase_info_user info;
480
481 if (copy_from_user(&info, argp, sizeof(info)))
482 return -EFAULT;
483
484 if (!mtd->lock)
485 ret = -EOPNOTSUPP;
486 else
487 ret = mtd->lock(mtd, info.start, info.length);
488 break;
489 }
490
491 case MEMUNLOCK:
492 {
493 struct erase_info_user info;
494
495 if (copy_from_user(&info, argp, sizeof(info)))
496 return -EFAULT;
497
498 if (!mtd->unlock)
499 ret = -EOPNOTSUPP;
500 else
501 ret = mtd->unlock(mtd, info.start, info.length);
502 break;
503 }
504
505 case MEMSETOOBSEL:
506 {
507 if (copy_from_user(&mtd->oobinfo, argp, sizeof(struct nand_oobinfo)))
508 return -EFAULT;
509 break;
510 }
511
512 case MEMGETOOBSEL:
513 {
514 if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo)))
515 return -EFAULT;
516 break;
517 }
518
519 case MEMGETBADBLOCK:
520 {
521 loff_t offs;
522
523 if (copy_from_user(&offs, argp, sizeof(loff_t)))
524 return -EFAULT;
525 if (!mtd->block_isbad)
526 ret = -EOPNOTSUPP;
527 else
528 return mtd->block_isbad(mtd, offs);
529 break;
530 }
531
532 case MEMSETBADBLOCK:
533 {
534 loff_t offs;
535
536 if (copy_from_user(&offs, argp, sizeof(loff_t)))
537 return -EFAULT;
538 if (!mtd->block_markbad)
539 ret = -EOPNOTSUPP;
540 else
541 return mtd->block_markbad(mtd, offs);
542 break;
543 }
544
Nicolas Pitre31f42332005-02-08 17:45:55 +0000545#ifdef CONFIG_MTD_OTP
546 case OTPSELECT:
547 {
548 int mode;
549 if (copy_from_user(&mode, argp, sizeof(int)))
550 return -EFAULT;
Nicolas Pitre045e9a52005-02-08 19:12:53 +0000551 SET_MTD_MODE(file, 0);
Nicolas Pitre31f42332005-02-08 17:45:55 +0000552 switch (mode) {
553 case MTD_OTP_FACTORY:
554 if (!mtd->read_fact_prot_reg)
555 ret = -EOPNOTSUPP;
556 else
Nicolas Pitre045e9a52005-02-08 19:12:53 +0000557 SET_MTD_MODE(file, MTD_MODE_OTP_FACT);
Nicolas Pitre31f42332005-02-08 17:45:55 +0000558 break;
559 case MTD_OTP_USER:
560 if (!mtd->read_fact_prot_reg)
561 ret = -EOPNOTSUPP;
562 else
Nicolas Pitre045e9a52005-02-08 19:12:53 +0000563 SET_MTD_MODE(file, MTD_MODE_OTP_USER);
Nicolas Pitre31f42332005-02-08 17:45:55 +0000564 break;
565 default:
566 ret = -EINVAL;
567 case MTD_OTP_OFF:
568 break;
569 }
Nicolas Pitre81dba482005-04-01 16:36:15 +0100570 file->f_pos = 0;
Nicolas Pitre31f42332005-02-08 17:45:55 +0000571 break;
572 }
573
574 case OTPGETREGIONCOUNT:
575 case OTPGETREGIONINFO:
576 {
577 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
578 if (!buf)
579 return -ENOMEM;
580 ret = -EOPNOTSUPP;
Nicolas Pitre045e9a52005-02-08 19:12:53 +0000581 switch (MTD_MODE(file)) {
Nicolas Pitre31f42332005-02-08 17:45:55 +0000582 case MTD_MODE_OTP_FACT:
583 if (mtd->get_fact_prot_info)
584 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
585 break;
586 case MTD_MODE_OTP_USER:
587 if (mtd->get_user_prot_info)
588 ret = mtd->get_user_prot_info(mtd, buf, 4096);
589 break;
590 }
591 if (ret >= 0) {
592 if (cmd == OTPGETREGIONCOUNT) {
593 int nbr = ret / sizeof(struct otp_info);
594 ret = copy_to_user(argp, &nbr, sizeof(int));
595 } else
596 ret = copy_to_user(argp, buf, ret);
597 if (ret)
598 ret = -EFAULT;
599 }
600 kfree(buf);
601 break;
602 }
603
604 case OTPLOCK:
605 {
606 struct otp_info info;
607
Nicolas Pitre045e9a52005-02-08 19:12:53 +0000608 if (MTD_MODE(file) != MTD_MODE_OTP_USER)
Nicolas Pitre31f42332005-02-08 17:45:55 +0000609 return -EINVAL;
610 if (copy_from_user(&info, argp, sizeof(info)))
611 return -EFAULT;
612 if (!mtd->lock_user_prot_reg)
613 return -EOPNOTSUPP;
614 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
615 break;
616 }
617#endif
618
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 default:
620 ret = -ENOTTY;
621 }
622
623 return ret;
624} /* memory_ioctl */
625
626static struct file_operations mtd_fops = {
627 .owner = THIS_MODULE,
628 .llseek = mtd_lseek,
629 .read = mtd_read,
630 .write = mtd_write,
631 .ioctl = mtd_ioctl,
632 .open = mtd_open,
633 .release = mtd_close,
634};
635
636static int __init init_mtdchar(void)
637{
638 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
639 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
640 MTD_CHAR_MAJOR);
641 return -EAGAIN;
642 }
643
Todd Poynor9bc7b382005-06-30 01:23:27 +0100644 mtd_class = class_create(THIS_MODULE, "mtd");
645
646 if (IS_ERR(mtd_class)) {
647 printk(KERN_ERR "Error creating mtd class.\n");
648 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
Coywolf Qi Hunt3a7a8822005-07-04 12:15:28 -0500649 return PTR_ERR(mtd_class);
Todd Poynor9bc7b382005-06-30 01:23:27 +0100650 }
651
652 register_mtd_user(&notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 return 0;
654}
655
656static void __exit cleanup_mtdchar(void)
657{
Todd Poynor9bc7b382005-06-30 01:23:27 +0100658 unregister_mtd_user(&notifier);
659 class_destroy(mtd_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
661}
662
663module_init(init_mtdchar);
664module_exit(cleanup_mtdchar);
665
666
667MODULE_LICENSE("GPL");
668MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
669MODULE_DESCRIPTION("Direct character-device access to MTD devices");