blob: 6ea2d8058a4ae92273c76089868d837832b0e646 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nicolas Pitre31f42332005-02-08 17:45:55 +00002 * $Id: mtdchar.c,v 1.67 2005/02/08 17:45:51 nico Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Character-device access to raw MTD devices.
5 *
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/compatmac.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/fs.h>
16#include <asm/uaccess.h>
17
18#ifdef CONFIG_DEVFS_FS
19#include <linux/devfs_fs_kernel.h>
20
21static void mtd_notify_add(struct mtd_info* mtd)
22{
23 if (!mtd)
24 return;
25
26 devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
27 S_IFCHR | S_IRUGO | S_IWUGO, "mtd/%d", mtd->index);
28
29 devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
30 S_IFCHR | S_IRUGO, "mtd/%dro", mtd->index);
31}
32
33static void mtd_notify_remove(struct mtd_info* mtd)
34{
35 if (!mtd)
36 return;
37 devfs_remove("mtd/%d", mtd->index);
38 devfs_remove("mtd/%dro", mtd->index);
39}
40
41static struct mtd_notifier notifier = {
42 .add = mtd_notify_add,
43 .remove = mtd_notify_remove,
44};
45
46static inline void mtdchar_devfs_init(void)
47{
48 devfs_mk_dir("mtd");
49 register_mtd_user(&notifier);
50}
51
52static inline void mtdchar_devfs_exit(void)
53{
54 unregister_mtd_user(&notifier);
55 devfs_remove("mtd");
56}
57#else /* !DEVFS */
58#define mtdchar_devfs_init() do { } while(0)
59#define mtdchar_devfs_exit() do { } while(0)
60#endif
61
Nicolas Pitre31f42332005-02-08 17:45:55 +000062
63/* Well... let's abuse the unused bits in file->f_mode for those */
64#define MTD_MODE_OTP_FACT 0x1000
65#define MTD_MODE_OTP_USER 0x2000
66#define MTD_MODE_MASK 0xf000
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
69{
70 struct mtd_info *mtd = file->private_data;
71
72 switch (orig) {
73 case 0:
74 /* SEEK_SET */
75 file->f_pos = offset;
76 break;
77 case 1:
78 /* SEEK_CUR */
79 file->f_pos += offset;
80 break;
81 case 2:
82 /* SEEK_END */
83 file->f_pos =mtd->size + offset;
84 break;
85 default:
86 return -EINVAL;
87 }
88
89 if (file->f_pos < 0)
90 file->f_pos = 0;
91 else if (file->f_pos >= mtd->size)
92 file->f_pos = mtd->size - 1;
93
94 return file->f_pos;
95}
96
97
98
99static int mtd_open(struct inode *inode, struct file *file)
100{
101 int minor = iminor(inode);
102 int devnum = minor >> 1;
103 struct mtd_info *mtd;
104
105 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
106
107 if (devnum >= MAX_MTD_DEVICES)
108 return -ENODEV;
109
110 /* You can't open the RO devices RW */
111 if ((file->f_mode & 2) && (minor & 1))
112 return -EACCES;
113
Nicolas Pitre31f42332005-02-08 17:45:55 +0000114 /* make sure the locally abused bits are initialy clear */
115 if (file->f_mode & MTD_MODE_MASK)
116 return -EWOULDBLOCK;
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 mtd = get_mtd_device(NULL, devnum);
119
120 if (!mtd)
121 return -ENODEV;
122
123 if (MTD_ABSENT == mtd->type) {
124 put_mtd_device(mtd);
125 return -ENODEV;
126 }
127
128 file->private_data = mtd;
129
130 /* You can't open it RW if it's not a writeable device */
131 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
132 put_mtd_device(mtd);
133 return -EACCES;
134 }
135
136 return 0;
137} /* mtd_open */
138
139/*====================================================================*/
140
141static int mtd_close(struct inode *inode, struct file *file)
142{
143 struct mtd_info *mtd;
144
145 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
146
147 mtd = file->private_data;
148
149 if (mtd->sync)
150 mtd->sync(mtd);
151
152 put_mtd_device(mtd);
153
154 return 0;
155} /* mtd_close */
156
157/* FIXME: This _really_ needs to die. In 2.5, we should lock the
158 userspace buffer down and use it directly with readv/writev.
159*/
160#define MAX_KMALLOC_SIZE 0x20000
161
162static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
163{
164 struct mtd_info *mtd = file->private_data;
165 size_t retlen=0;
166 size_t total_retlen=0;
167 int ret=0;
168 int len;
169 char *kbuf;
170
171 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
172
173 if (*ppos + count > mtd->size)
174 count = mtd->size - *ppos;
175
176 if (!count)
177 return 0;
178
179 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
180 and pass them directly to the MTD functions */
181 while (count) {
182 if (count > MAX_KMALLOC_SIZE)
183 len = MAX_KMALLOC_SIZE;
184 else
185 len = count;
186
187 kbuf=kmalloc(len,GFP_KERNEL);
188 if (!kbuf)
189 return -ENOMEM;
190
Nicolas Pitre31f42332005-02-08 17:45:55 +0000191 switch (file->f_mode & MTD_MODE_MASK) {
192 case MTD_MODE_OTP_FACT:
193 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
194 break;
195 case MTD_MODE_OTP_USER:
196 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
197 break;
198 default:
199 ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf);
200 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 /* Nand returns -EBADMSG on ecc errors, but it returns
202 * the data. For our userspace tools it is important
203 * to dump areas with ecc errors !
204 * Userspace software which accesses NAND this way
205 * must be aware of the fact that it deals with NAND
206 */
207 if (!ret || (ret == -EBADMSG)) {
208 *ppos += retlen;
209 if (copy_to_user(buf, kbuf, retlen)) {
210 kfree(kbuf);
211 return -EFAULT;
212 }
213 else
214 total_retlen += retlen;
215
216 count -= retlen;
217 buf += retlen;
Nicolas Pitre31f42332005-02-08 17:45:55 +0000218 if (retlen == 0)
219 count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
221 else {
222 kfree(kbuf);
223 return ret;
224 }
225
226 kfree(kbuf);
227 }
228
229 return total_retlen;
230} /* mtd_read */
231
232static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
233{
234 struct mtd_info *mtd = file->private_data;
235 char *kbuf;
236 size_t retlen;
237 size_t total_retlen=0;
238 int ret=0;
239 int len;
240
241 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
242
243 if (*ppos == mtd->size)
244 return -ENOSPC;
245
246 if (*ppos + count > mtd->size)
247 count = mtd->size - *ppos;
248
249 if (!count)
250 return 0;
251
252 while (count) {
253 if (count > MAX_KMALLOC_SIZE)
254 len = MAX_KMALLOC_SIZE;
255 else
256 len = count;
257
258 kbuf=kmalloc(len,GFP_KERNEL);
259 if (!kbuf) {
260 printk("kmalloc is null\n");
261 return -ENOMEM;
262 }
263
264 if (copy_from_user(kbuf, buf, len)) {
265 kfree(kbuf);
266 return -EFAULT;
267 }
268
Nicolas Pitre31f42332005-02-08 17:45:55 +0000269 switch (file->f_mode & MTD_MODE_MASK) {
270 case MTD_MODE_OTP_FACT:
271 ret = -EROFS;
272 break;
273 case MTD_MODE_OTP_USER:
274 if (!mtd->write_user_prot_reg) {
275 ret = -EOPNOTSUPP;
276 break;
277 }
278 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
279 break;
280 default:
281 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
282 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 if (!ret) {
284 *ppos += retlen;
285 total_retlen += retlen;
286 count -= retlen;
287 buf += retlen;
288 }
289 else {
290 kfree(kbuf);
291 return ret;
292 }
293
294 kfree(kbuf);
295 }
296
297 return total_retlen;
298} /* mtd_write */
299
300/*======================================================================
301
302 IOCTL calls for getting device parameters.
303
304======================================================================*/
305static void mtdchar_erase_callback (struct erase_info *instr)
306{
307 wake_up((wait_queue_head_t *)instr->priv);
308}
309
310static int mtd_ioctl(struct inode *inode, struct file *file,
311 u_int cmd, u_long arg)
312{
313 struct mtd_info *mtd = file->private_data;
314 void __user *argp = (void __user *)arg;
315 int ret = 0;
316 u_long size;
317
318 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
319
320 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
321 if (cmd & IOC_IN) {
322 if (!access_ok(VERIFY_READ, argp, size))
323 return -EFAULT;
324 }
325 if (cmd & IOC_OUT) {
326 if (!access_ok(VERIFY_WRITE, argp, size))
327 return -EFAULT;
328 }
329
330 switch (cmd) {
331 case MEMGETREGIONCOUNT:
332 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
333 return -EFAULT;
334 break;
335
336 case MEMGETREGIONINFO:
337 {
338 struct region_info_user ur;
339
340 if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
341 return -EFAULT;
342
343 if (ur.regionindex >= mtd->numeraseregions)
344 return -EINVAL;
345 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
346 sizeof(struct mtd_erase_region_info)))
347 return -EFAULT;
348 break;
349 }
350
351 case MEMGETINFO:
352 if (copy_to_user(argp, mtd, sizeof(struct mtd_info_user)))
353 return -EFAULT;
354 break;
355
356 case MEMERASE:
357 {
358 struct erase_info *erase;
359
360 if(!(file->f_mode & 2))
361 return -EPERM;
362
363 erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
364 if (!erase)
365 ret = -ENOMEM;
366 else {
367 wait_queue_head_t waitq;
368 DECLARE_WAITQUEUE(wait, current);
369
370 init_waitqueue_head(&waitq);
371
372 memset (erase,0,sizeof(struct erase_info));
373 if (copy_from_user(&erase->addr, argp,
374 sizeof(struct erase_info_user))) {
375 kfree(erase);
376 return -EFAULT;
377 }
378 erase->mtd = mtd;
379 erase->callback = mtdchar_erase_callback;
380 erase->priv = (unsigned long)&waitq;
381
382 /*
383 FIXME: Allow INTERRUPTIBLE. Which means
384 not having the wait_queue head on the stack.
385
386 If the wq_head is on the stack, and we
387 leave because we got interrupted, then the
388 wq_head is no longer there when the
389 callback routine tries to wake us up.
390 */
391 ret = mtd->erase(mtd, erase);
392 if (!ret) {
393 set_current_state(TASK_UNINTERRUPTIBLE);
394 add_wait_queue(&waitq, &wait);
395 if (erase->state != MTD_ERASE_DONE &&
396 erase->state != MTD_ERASE_FAILED)
397 schedule();
398 remove_wait_queue(&waitq, &wait);
399 set_current_state(TASK_RUNNING);
400
401 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
402 }
403 kfree(erase);
404 }
405 break;
406 }
407
408 case MEMWRITEOOB:
409 {
410 struct mtd_oob_buf buf;
411 void *databuf;
412 ssize_t retlen;
413
414 if(!(file->f_mode & 2))
415 return -EPERM;
416
417 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
418 return -EFAULT;
419
420 if (buf.length > 0x4096)
421 return -EINVAL;
422
423 if (!mtd->write_oob)
424 ret = -EOPNOTSUPP;
425 else
426 ret = access_ok(VERIFY_READ, buf.ptr,
427 buf.length) ? 0 : EFAULT;
428
429 if (ret)
430 return ret;
431
432 databuf = kmalloc(buf.length, GFP_KERNEL);
433 if (!databuf)
434 return -ENOMEM;
435
436 if (copy_from_user(databuf, buf.ptr, buf.length)) {
437 kfree(databuf);
438 return -EFAULT;
439 }
440
441 ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf);
442
443 if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t)))
444 ret = -EFAULT;
445
446 kfree(databuf);
447 break;
448
449 }
450
451 case MEMREADOOB:
452 {
453 struct mtd_oob_buf buf;
454 void *databuf;
455 ssize_t retlen;
456
457 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
458 return -EFAULT;
459
460 if (buf.length > 0x4096)
461 return -EINVAL;
462
463 if (!mtd->read_oob)
464 ret = -EOPNOTSUPP;
465 else
466 ret = access_ok(VERIFY_WRITE, buf.ptr,
467 buf.length) ? 0 : -EFAULT;
468
469 if (ret)
470 return ret;
471
472 databuf = kmalloc(buf.length, GFP_KERNEL);
473 if (!databuf)
474 return -ENOMEM;
475
476 ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf);
477
478 if (put_user(retlen, (uint32_t __user *)argp))
479 ret = -EFAULT;
480 else if (retlen && copy_to_user(buf.ptr, databuf, retlen))
481 ret = -EFAULT;
482
483 kfree(databuf);
484 break;
485 }
486
487 case MEMLOCK:
488 {
489 struct erase_info_user info;
490
491 if (copy_from_user(&info, argp, sizeof(info)))
492 return -EFAULT;
493
494 if (!mtd->lock)
495 ret = -EOPNOTSUPP;
496 else
497 ret = mtd->lock(mtd, info.start, info.length);
498 break;
499 }
500
501 case MEMUNLOCK:
502 {
503 struct erase_info_user info;
504
505 if (copy_from_user(&info, argp, sizeof(info)))
506 return -EFAULT;
507
508 if (!mtd->unlock)
509 ret = -EOPNOTSUPP;
510 else
511 ret = mtd->unlock(mtd, info.start, info.length);
512 break;
513 }
514
515 case MEMSETOOBSEL:
516 {
517 if (copy_from_user(&mtd->oobinfo, argp, sizeof(struct nand_oobinfo)))
518 return -EFAULT;
519 break;
520 }
521
522 case MEMGETOOBSEL:
523 {
524 if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo)))
525 return -EFAULT;
526 break;
527 }
528
529 case MEMGETBADBLOCK:
530 {
531 loff_t offs;
532
533 if (copy_from_user(&offs, argp, sizeof(loff_t)))
534 return -EFAULT;
535 if (!mtd->block_isbad)
536 ret = -EOPNOTSUPP;
537 else
538 return mtd->block_isbad(mtd, offs);
539 break;
540 }
541
542 case MEMSETBADBLOCK:
543 {
544 loff_t offs;
545
546 if (copy_from_user(&offs, argp, sizeof(loff_t)))
547 return -EFAULT;
548 if (!mtd->block_markbad)
549 ret = -EOPNOTSUPP;
550 else
551 return mtd->block_markbad(mtd, offs);
552 break;
553 }
554
Nicolas Pitre31f42332005-02-08 17:45:55 +0000555#ifdef CONFIG_MTD_OTP
556 case OTPSELECT:
557 {
558 int mode;
559 if (copy_from_user(&mode, argp, sizeof(int)))
560 return -EFAULT;
561 file->f_mode &= ~MTD_MODE_MASK;
562 switch (mode) {
563 case MTD_OTP_FACTORY:
564 if (!mtd->read_fact_prot_reg)
565 ret = -EOPNOTSUPP;
566 else
567 file->f_mode |= MTD_MODE_OTP_FACT;
568 break;
569 case MTD_OTP_USER:
570 if (!mtd->read_fact_prot_reg)
571 ret = -EOPNOTSUPP;
572 else
573 file->f_mode |= MTD_MODE_OTP_USER;
574 break;
575 default:
576 ret = -EINVAL;
577 case MTD_OTP_OFF:
578 break;
579 }
580 break;
581 }
582
583 case OTPGETREGIONCOUNT:
584 case OTPGETREGIONINFO:
585 {
586 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
587 if (!buf)
588 return -ENOMEM;
589 ret = -EOPNOTSUPP;
590 switch (file->f_mode & MTD_MODE_MASK) {
591 case MTD_MODE_OTP_FACT:
592 if (mtd->get_fact_prot_info)
593 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
594 break;
595 case MTD_MODE_OTP_USER:
596 if (mtd->get_user_prot_info)
597 ret = mtd->get_user_prot_info(mtd, buf, 4096);
598 break;
599 }
600 if (ret >= 0) {
601 if (cmd == OTPGETREGIONCOUNT) {
602 int nbr = ret / sizeof(struct otp_info);
603 ret = copy_to_user(argp, &nbr, sizeof(int));
604 } else
605 ret = copy_to_user(argp, buf, ret);
606 if (ret)
607 ret = -EFAULT;
608 }
609 kfree(buf);
610 break;
611 }
612
613 case OTPLOCK:
614 {
615 struct otp_info info;
616
617 if ((file->f_mode & MTD_MODE_MASK) != MTD_MODE_OTP_USER)
618 return -EINVAL;
619 if (copy_from_user(&info, argp, sizeof(info)))
620 return -EFAULT;
621 if (!mtd->lock_user_prot_reg)
622 return -EOPNOTSUPP;
623 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
624 break;
625 }
626#endif
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 default:
629 ret = -ENOTTY;
630 }
631
632 return ret;
633} /* memory_ioctl */
634
635static struct file_operations mtd_fops = {
636 .owner = THIS_MODULE,
637 .llseek = mtd_lseek,
638 .read = mtd_read,
639 .write = mtd_write,
640 .ioctl = mtd_ioctl,
641 .open = mtd_open,
642 .release = mtd_close,
643};
644
645static int __init init_mtdchar(void)
646{
647 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
648 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
649 MTD_CHAR_MAJOR);
650 return -EAGAIN;
651 }
652
653 mtdchar_devfs_init();
654 return 0;
655}
656
657static void __exit cleanup_mtdchar(void)
658{
659 mtdchar_devfs_exit();
660 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
661}
662
663module_init(init_mtdchar);
664module_exit(cleanup_mtdchar);
665
666
667MODULE_LICENSE("GPL");
668MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
669MODULE_DESCRIPTION("Direct character-device access to MTD devices");