blob: f478f1fc39495b90801e440940d436011ef300b3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Character-device access to raw MTD devices.
3 *
4 */
5
Thomas Gleixner15fdc522005-11-07 00:14:42 +01006#include <linux/device.h>
7#include <linux/fs.h>
Andrew Morton0c1eafd2007-08-10 13:01:06 -07008#include <linux/mm.h>
Artem Bityutskiy9c740342006-10-11 14:52:47 +03009#include <linux/err.h>
Thomas Gleixner15fdc522005-11-07 00:14:42 +010010#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel.h>
12#include <linux/module.h>
Thomas Gleixner15fdc522005-11-07 00:14:42 +010013#include <linux/slab.h>
14#include <linux/sched.h>
Jonathan Corbet60712392008-05-15 10:10:37 -060015#include <linux/smp_lock.h>
David Howells402d3262009-02-12 10:40:00 +000016#include <linux/backing-dev.h>
Thomas Gleixner15fdc522005-11-07 00:14:42 +010017
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/mtd/mtd.h>
19#include <linux/mtd/compatmac.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Thomas Gleixner15fdc522005-11-07 00:14:42 +010021#include <asm/uaccess.h>
Todd Poynor9bc7b382005-06-30 01:23:27 +010022
23static struct class *mtd_class;
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25static void mtd_notify_add(struct mtd_info* mtd)
26{
27 if (!mtd)
28 return;
29
Greg Kroah-Hartmana9b12612008-07-21 20:03:34 -070030 device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
31 NULL, "mtd%d", mtd->index);
Thomas Gleixner97894cd2005-11-07 11:15:26 +000032
Greg Kroah-Hartmana9b12612008-07-21 20:03:34 -070033 device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
34 NULL, "mtd%dro", mtd->index);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035}
36
37static void mtd_notify_remove(struct mtd_info* mtd)
38{
39 if (!mtd)
40 return;
Todd Poynor9bc7b382005-06-30 01:23:27 +010041
Tony Jonesa98894a2007-09-25 02:03:03 +020042 device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2));
43 device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1));
Linus Torvalds1da177e2005-04-16 15:20:36 -070044}
45
46static struct mtd_notifier notifier = {
47 .add = mtd_notify_add,
48 .remove = mtd_notify_remove,
49};
50
Nicolas Pitre045e9a52005-02-08 19:12:53 +000051/*
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +020052 * Data structure to hold the pointer to the mtd device as well
53 * as mode information ofr various use cases.
Nicolas Pitre045e9a52005-02-08 19:12:53 +000054 */
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +020055struct mtd_file_info {
56 struct mtd_info *mtd;
57 enum mtd_file_modes mode;
58};
Nicolas Pitre31f42332005-02-08 17:45:55 +000059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
61{
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +020062 struct mtd_file_info *mfi = file->private_data;
63 struct mtd_info *mtd = mfi->mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65 switch (orig) {
Josef 'Jeff' Sipekea598302006-09-16 21:09:29 -040066 case SEEK_SET:
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 break;
Josef 'Jeff' Sipekea598302006-09-16 21:09:29 -040068 case SEEK_CUR:
Todd Poynor8b491d72005-08-04 02:05:51 +010069 offset += file->f_pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 break;
Josef 'Jeff' Sipekea598302006-09-16 21:09:29 -040071 case SEEK_END:
Todd Poynor8b491d72005-08-04 02:05:51 +010072 offset += mtd->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 break;
74 default:
75 return -EINVAL;
76 }
77
Herbert Valerio Riedel1887f512006-06-24 00:03:36 +020078 if (offset >= 0 && offset <= mtd->size)
Todd Poynor8b491d72005-08-04 02:05:51 +010079 return file->f_pos = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Todd Poynor8b491d72005-08-04 02:05:51 +010081 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84
85
86static int mtd_open(struct inode *inode, struct file *file)
87{
88 int minor = iminor(inode);
89 int devnum = minor >> 1;
Jonathan Corbet60712392008-05-15 10:10:37 -060090 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 struct mtd_info *mtd;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +020092 struct mtd_file_info *mfi;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
95
96 if (devnum >= MAX_MTD_DEVICES)
97 return -ENODEV;
98
99 /* You can't open the RO devices RW */
Al Viroaeb5d722008-09-02 15:28:45 -0400100 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 return -EACCES;
102
Jonathan Corbet60712392008-05-15 10:10:37 -0600103 lock_kernel();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 mtd = get_mtd_device(NULL, devnum);
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000105
Jonathan Corbet60712392008-05-15 10:10:37 -0600106 if (IS_ERR(mtd)) {
107 ret = PTR_ERR(mtd);
108 goto out;
109 }
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000110
David Howells402d3262009-02-12 10:40:00 +0000111 if (mtd->type == MTD_ABSENT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 put_mtd_device(mtd);
Jonathan Corbet60712392008-05-15 10:10:37 -0600113 ret = -ENODEV;
114 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 }
116
David Howells402d3262009-02-12 10:40:00 +0000117 if (mtd->backing_dev_info)
118 file->f_mapping->backing_dev_info = mtd->backing_dev_info;
119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 /* You can't open it RW if it's not a writeable device */
Al Viroaeb5d722008-09-02 15:28:45 -0400121 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 put_mtd_device(mtd);
Jonathan Corbet60712392008-05-15 10:10:37 -0600123 ret = -EACCES;
124 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 }
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000126
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200127 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
128 if (!mfi) {
129 put_mtd_device(mtd);
Jonathan Corbet60712392008-05-15 10:10:37 -0600130 ret = -ENOMEM;
131 goto out;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200132 }
133 mfi->mtd = mtd;
134 file->private_data = mfi;
135
Jonathan Corbet60712392008-05-15 10:10:37 -0600136out:
137 unlock_kernel();
138 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139} /* mtd_open */
140
141/*====================================================================*/
142
143static int mtd_close(struct inode *inode, struct file *file)
144{
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200145 struct mtd_file_info *mfi = file->private_data;
146 struct mtd_info *mtd = mfi->mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
149
Joakim Tjernlund7eafaed2007-06-27 00:56:40 +0200150 /* Only sync if opened RW */
Al Viroaeb5d722008-09-02 15:28:45 -0400151 if ((file->f_mode & FMODE_WRITE) && mtd->sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 mtd->sync(mtd);
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 put_mtd_device(mtd);
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200155 file->private_data = NULL;
156 kfree(mfi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
158 return 0;
159} /* mtd_close */
160
161/* FIXME: This _really_ needs to die. In 2.5, we should lock the
162 userspace buffer down and use it directly with readv/writev.
163*/
164#define MAX_KMALLOC_SIZE 0x20000
165
166static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
167{
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200168 struct mtd_file_info *mfi = file->private_data;
169 struct mtd_info *mtd = mfi->mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 size_t retlen=0;
171 size_t total_retlen=0;
172 int ret=0;
173 int len;
174 char *kbuf;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
177
178 if (*ppos + count > mtd->size)
179 count = mtd->size - *ppos;
180
181 if (!count)
182 return 0;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
185 and pass them directly to the MTD functions */
Thago Galesib802c072006-04-17 17:38:15 +0100186
187 if (count > MAX_KMALLOC_SIZE)
188 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
189 else
190 kbuf=kmalloc(count, GFP_KERNEL);
191
192 if (!kbuf)
193 return -ENOMEM;
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 while (count) {
Thago Galesib802c072006-04-17 17:38:15 +0100196
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000197 if (count > MAX_KMALLOC_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 len = MAX_KMALLOC_SIZE;
199 else
200 len = count;
201
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200202 switch (mfi->mode) {
203 case MTD_MODE_OTP_FACTORY:
Nicolas Pitre31f42332005-02-08 17:45:55 +0000204 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
205 break;
206 case MTD_MODE_OTP_USER:
207 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
208 break;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200209 case MTD_MODE_RAW:
210 {
211 struct mtd_oob_ops ops;
212
213 ops.mode = MTD_OOB_RAW;
214 ops.datbuf = kbuf;
215 ops.oobbuf = NULL;
216 ops.len = len;
217
218 ret = mtd->read_oob(mtd, *ppos, &ops);
219 retlen = ops.retlen;
220 break;
221 }
Nicolas Pitre31f42332005-02-08 17:45:55 +0000222 default:
Thomas Gleixnerf4a43cf2006-05-28 11:01:53 +0200223 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
Nicolas Pitre31f42332005-02-08 17:45:55 +0000224 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 /* Nand returns -EBADMSG on ecc errors, but it returns
226 * the data. For our userspace tools it is important
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000227 * to dump areas with ecc errors !
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200228 * For kernel internal usage it also might return -EUCLEAN
229 * to signal the caller that a bitflip has occured and has
230 * been corrected by the ECC algorithm.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 * Userspace software which accesses NAND this way
232 * must be aware of the fact that it deals with NAND
233 */
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200234 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 *ppos += retlen;
236 if (copy_to_user(buf, kbuf, retlen)) {
Thomas Gleixnerf4a43cf2006-05-28 11:01:53 +0200237 kfree(kbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 return -EFAULT;
239 }
240 else
241 total_retlen += retlen;
242
243 count -= retlen;
244 buf += retlen;
Nicolas Pitre31f42332005-02-08 17:45:55 +0000245 if (retlen == 0)
246 count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 }
248 else {
249 kfree(kbuf);
250 return ret;
251 }
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000252
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 }
254
Thago Galesib802c072006-04-17 17:38:15 +0100255 kfree(kbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 return total_retlen;
257} /* mtd_read */
258
259static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
260{
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200261 struct mtd_file_info *mfi = file->private_data;
262 struct mtd_info *mtd = mfi->mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 char *kbuf;
264 size_t retlen;
265 size_t total_retlen=0;
266 int ret=0;
267 int len;
268
269 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 if (*ppos == mtd->size)
272 return -ENOSPC;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 if (*ppos + count > mtd->size)
275 count = mtd->size - *ppos;
276
277 if (!count)
278 return 0;
279
Thago Galesib802c072006-04-17 17:38:15 +0100280 if (count > MAX_KMALLOC_SIZE)
281 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
282 else
283 kbuf=kmalloc(count, GFP_KERNEL);
284
285 if (!kbuf)
286 return -ENOMEM;
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 while (count) {
Thago Galesib802c072006-04-17 17:38:15 +0100289
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000290 if (count > MAX_KMALLOC_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 len = MAX_KMALLOC_SIZE;
292 else
293 len = count;
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 if (copy_from_user(kbuf, buf, len)) {
296 kfree(kbuf);
297 return -EFAULT;
298 }
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000299
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200300 switch (mfi->mode) {
301 case MTD_MODE_OTP_FACTORY:
Nicolas Pitre31f42332005-02-08 17:45:55 +0000302 ret = -EROFS;
303 break;
304 case MTD_MODE_OTP_USER:
305 if (!mtd->write_user_prot_reg) {
306 ret = -EOPNOTSUPP;
307 break;
308 }
309 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
310 break;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200311
312 case MTD_MODE_RAW:
313 {
314 struct mtd_oob_ops ops;
315
316 ops.mode = MTD_OOB_RAW;
317 ops.datbuf = kbuf;
318 ops.oobbuf = NULL;
319 ops.len = len;
320
321 ret = mtd->write_oob(mtd, *ppos, &ops);
322 retlen = ops.retlen;
323 break;
324 }
325
Nicolas Pitre31f42332005-02-08 17:45:55 +0000326 default:
327 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 if (!ret) {
330 *ppos += retlen;
331 total_retlen += retlen;
332 count -= retlen;
333 buf += retlen;
334 }
335 else {
336 kfree(kbuf);
337 return ret;
338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 }
340
Thago Galesib802c072006-04-17 17:38:15 +0100341 kfree(kbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 return total_retlen;
343} /* mtd_write */
344
345/*======================================================================
346
347 IOCTL calls for getting device parameters.
348
349======================================================================*/
350static void mtdchar_erase_callback (struct erase_info *instr)
351{
352 wake_up((wait_queue_head_t *)instr->priv);
353}
354
David Brownell34a82442008-07-30 12:35:05 -0700355#ifdef CONFIG_HAVE_MTD_OTP
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200356static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
357{
358 struct mtd_info *mtd = mfi->mtd;
359 int ret = 0;
360
361 switch (mode) {
362 case MTD_OTP_FACTORY:
363 if (!mtd->read_fact_prot_reg)
364 ret = -EOPNOTSUPP;
365 else
366 mfi->mode = MTD_MODE_OTP_FACTORY;
367 break;
368 case MTD_OTP_USER:
369 if (!mtd->read_fact_prot_reg)
370 ret = -EOPNOTSUPP;
371 else
372 mfi->mode = MTD_MODE_OTP_USER;
373 break;
374 default:
375 ret = -EINVAL;
376 case MTD_OTP_OFF:
377 break;
378 }
379 return ret;
380}
381#else
382# define otp_select_filemode(f,m) -EOPNOTSUPP
383#endif
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385static int mtd_ioctl(struct inode *inode, struct file *file,
386 u_int cmd, u_long arg)
387{
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200388 struct mtd_file_info *mfi = file->private_data;
389 struct mtd_info *mtd = mfi->mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 void __user *argp = (void __user *)arg;
391 int ret = 0;
392 u_long size;
Joern Engel73c619e2006-05-30 14:25:35 +0200393 struct mtd_info_user info;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
396
397 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
398 if (cmd & IOC_IN) {
399 if (!access_ok(VERIFY_READ, argp, size))
400 return -EFAULT;
401 }
402 if (cmd & IOC_OUT) {
403 if (!access_ok(VERIFY_WRITE, argp, size))
404 return -EFAULT;
405 }
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 switch (cmd) {
408 case MEMGETREGIONCOUNT:
409 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
410 return -EFAULT;
411 break;
412
413 case MEMGETREGIONINFO:
414 {
Zev Weissb67c5f82008-09-01 05:02:12 -0700415 uint32_t ur_idx;
416 struct mtd_erase_region_info *kr;
417 struct region_info_user *ur = (struct region_info_user *) argp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Zev Weissb67c5f82008-09-01 05:02:12 -0700419 if (get_user(ur_idx, &(ur->regionindex)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 return -EFAULT;
421
Zev Weissb67c5f82008-09-01 05:02:12 -0700422 kr = &(mtd->eraseregions[ur_idx]);
423
424 if (put_user(kr->offset, &(ur->offset))
425 || put_user(kr->erasesize, &(ur->erasesize))
426 || put_user(kr->numblocks, &(ur->numblocks)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 return -EFAULT;
Zev Weissb67c5f82008-09-01 05:02:12 -0700428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 break;
430 }
431
432 case MEMGETINFO:
Joern Engel73c619e2006-05-30 14:25:35 +0200433 info.type = mtd->type;
434 info.flags = mtd->flags;
435 info.size = mtd->size;
436 info.erasesize = mtd->erasesize;
437 info.writesize = mtd->writesize;
438 info.oobsize = mtd->oobsize;
Artem Bityutskiy64f60712007-01-30 10:50:43 +0200439 /* The below fields are obsolete */
440 info.ecctype = -1;
441 info.eccsize = 0;
Joern Engel73c619e2006-05-30 14:25:35 +0200442 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 return -EFAULT;
444 break;
445
446 case MEMERASE:
447 {
448 struct erase_info *erase;
449
Al Viroaeb5d722008-09-02 15:28:45 -0400450 if(!(file->f_mode & FMODE_WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 return -EPERM;
452
Burman Yan95b93a02006-11-15 21:10:29 +0200453 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 if (!erase)
455 ret = -ENOMEM;
456 else {
Adrian Hunter69423d92008-12-10 13:37:21 +0000457 struct erase_info_user einfo;
458
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 wait_queue_head_t waitq;
460 DECLARE_WAITQUEUE(wait, current);
461
462 init_waitqueue_head(&waitq);
463
Adrian Hunter69423d92008-12-10 13:37:21 +0000464 if (copy_from_user(&einfo, argp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 sizeof(struct erase_info_user))) {
466 kfree(erase);
467 return -EFAULT;
468 }
Adrian Hunter69423d92008-12-10 13:37:21 +0000469 erase->addr = einfo.start;
470 erase->len = einfo.length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 erase->mtd = mtd;
472 erase->callback = mtdchar_erase_callback;
473 erase->priv = (unsigned long)&waitq;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 /*
476 FIXME: Allow INTERRUPTIBLE. Which means
477 not having the wait_queue head on the stack.
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 If the wq_head is on the stack, and we
480 leave because we got interrupted, then the
481 wq_head is no longer there when the
482 callback routine tries to wake us up.
483 */
484 ret = mtd->erase(mtd, erase);
485 if (!ret) {
486 set_current_state(TASK_UNINTERRUPTIBLE);
487 add_wait_queue(&waitq, &wait);
488 if (erase->state != MTD_ERASE_DONE &&
489 erase->state != MTD_ERASE_FAILED)
490 schedule();
491 remove_wait_queue(&waitq, &wait);
492 set_current_state(TASK_RUNNING);
493
494 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
495 }
496 kfree(erase);
497 }
498 break;
499 }
500
501 case MEMWRITEOOB:
502 {
503 struct mtd_oob_buf buf;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200504 struct mtd_oob_ops ops;
Harvey Harrison5f692832008-07-03 23:40:13 -0700505 struct mtd_oob_buf __user *user_buf = argp;
David Scidmoree9d8d482007-12-11 17:44:30 -0600506 uint32_t retlen;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000507
Al Viroaeb5d722008-09-02 15:28:45 -0400508 if(!(file->f_mode & FMODE_WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 return -EPERM;
510
511 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
512 return -EFAULT;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000513
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200514 if (buf.length > 4096)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 return -EINVAL;
516
517 if (!mtd->write_oob)
518 ret = -EOPNOTSUPP;
519 else
520 ret = access_ok(VERIFY_READ, buf.ptr,
521 buf.length) ? 0 : EFAULT;
522
523 if (ret)
524 return ret;
525
Thomas Gleixner7bc33122006-06-20 20:05:05 +0200526 ops.ooblen = buf.length;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200527 ops.ooboffs = buf.start & (mtd->oobsize - 1);
528 ops.datbuf = NULL;
529 ops.mode = MTD_OOB_PLACE;
530
Vitaly Wool70145682006-11-03 18:20:38 +0300531 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200532 return -EINVAL;
533
534 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
535 if (!ops.oobbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 return -ENOMEM;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000537
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200538 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
539 kfree(ops.oobbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 return -EFAULT;
541 }
542
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200543 buf.start &= ~(mtd->oobsize - 1);
544 ret = mtd->write_oob(mtd, buf.start, &ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
David Scidmoree9d8d482007-12-11 17:44:30 -0600546 if (ops.oobretlen > 0xFFFFFFFFU)
547 ret = -EOVERFLOW;
548 retlen = ops.oobretlen;
Harvey Harrison5f692832008-07-03 23:40:13 -0700549 if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 ret = -EFAULT;
551
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200552 kfree(ops.oobbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 break;
554
555 }
556
557 case MEMREADOOB:
558 {
559 struct mtd_oob_buf buf;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200560 struct mtd_oob_ops ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
563 return -EFAULT;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000564
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200565 if (buf.length > 4096)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 return -EINVAL;
567
568 if (!mtd->read_oob)
569 ret = -EOPNOTSUPP;
570 else
571 ret = access_ok(VERIFY_WRITE, buf.ptr,
572 buf.length) ? 0 : -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 if (ret)
574 return ret;
575
Thomas Gleixner7bc33122006-06-20 20:05:05 +0200576 ops.ooblen = buf.length;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200577 ops.ooboffs = buf.start & (mtd->oobsize - 1);
578 ops.datbuf = NULL;
579 ops.mode = MTD_OOB_PLACE;
580
Thomas Gleixner408b4832007-04-13 19:50:48 +0200581 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200582 return -EINVAL;
583
584 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
585 if (!ops.oobbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 return -ENOMEM;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000587
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200588 buf.start &= ~(mtd->oobsize - 1);
589 ret = mtd->read_oob(mtd, buf.start, &ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
Vitaly Wool70145682006-11-03 18:20:38 +0300591 if (put_user(ops.oobretlen, (uint32_t __user *)argp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 ret = -EFAULT;
Vitaly Wool70145682006-11-03 18:20:38 +0300593 else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
594 ops.oobretlen))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 ret = -EFAULT;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000596
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200597 kfree(ops.oobbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 break;
599 }
600
601 case MEMLOCK:
602 {
Harvey Harrison175428b2008-07-03 23:40:14 -0700603 struct erase_info_user einfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Harvey Harrison175428b2008-07-03 23:40:14 -0700605 if (copy_from_user(&einfo, argp, sizeof(einfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 return -EFAULT;
607
608 if (!mtd->lock)
609 ret = -EOPNOTSUPP;
610 else
Harvey Harrison175428b2008-07-03 23:40:14 -0700611 ret = mtd->lock(mtd, einfo.start, einfo.length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 break;
613 }
614
615 case MEMUNLOCK:
616 {
Harvey Harrison175428b2008-07-03 23:40:14 -0700617 struct erase_info_user einfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Harvey Harrison175428b2008-07-03 23:40:14 -0700619 if (copy_from_user(&einfo, argp, sizeof(einfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 return -EFAULT;
621
622 if (!mtd->unlock)
623 ret = -EOPNOTSUPP;
624 else
Harvey Harrison175428b2008-07-03 23:40:14 -0700625 ret = mtd->unlock(mtd, einfo.start, einfo.length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 break;
627 }
628
Thomas Gleixner5bd34c02006-05-27 22:16:10 +0200629 /* Legacy interface */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 case MEMGETOOBSEL:
631 {
Thomas Gleixner5bd34c02006-05-27 22:16:10 +0200632 struct nand_oobinfo oi;
633
634 if (!mtd->ecclayout)
635 return -EOPNOTSUPP;
636 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
637 return -EINVAL;
638
639 oi.useecc = MTD_NANDECC_AUTOPLACE;
640 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
641 memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
642 sizeof(oi.oobfree));
Ricard Wanderlöfd25ade72006-10-17 17:27:11 +0200643 oi.eccbytes = mtd->ecclayout->eccbytes;
Thomas Gleixner5bd34c02006-05-27 22:16:10 +0200644
645 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 return -EFAULT;
647 break;
648 }
649
650 case MEMGETBADBLOCK:
651 {
652 loff_t offs;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000653
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 if (copy_from_user(&offs, argp, sizeof(loff_t)))
655 return -EFAULT;
656 if (!mtd->block_isbad)
657 ret = -EOPNOTSUPP;
658 else
659 return mtd->block_isbad(mtd, offs);
660 break;
661 }
662
663 case MEMSETBADBLOCK:
664 {
665 loff_t offs;
666
667 if (copy_from_user(&offs, argp, sizeof(loff_t)))
668 return -EFAULT;
669 if (!mtd->block_markbad)
670 ret = -EOPNOTSUPP;
671 else
672 return mtd->block_markbad(mtd, offs);
673 break;
674 }
675
David Brownell34a82442008-07-30 12:35:05 -0700676#ifdef CONFIG_HAVE_MTD_OTP
Nicolas Pitre31f42332005-02-08 17:45:55 +0000677 case OTPSELECT:
678 {
679 int mode;
680 if (copy_from_user(&mode, argp, sizeof(int)))
681 return -EFAULT;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200682
683 mfi->mode = MTD_MODE_NORMAL;
684
685 ret = otp_select_filemode(mfi, mode);
686
Nicolas Pitre81dba482005-04-01 16:36:15 +0100687 file->f_pos = 0;
Nicolas Pitre31f42332005-02-08 17:45:55 +0000688 break;
689 }
690
691 case OTPGETREGIONCOUNT:
692 case OTPGETREGIONINFO:
693 {
694 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
695 if (!buf)
696 return -ENOMEM;
697 ret = -EOPNOTSUPP;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200698 switch (mfi->mode) {
699 case MTD_MODE_OTP_FACTORY:
Nicolas Pitre31f42332005-02-08 17:45:55 +0000700 if (mtd->get_fact_prot_info)
701 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
702 break;
703 case MTD_MODE_OTP_USER:
704 if (mtd->get_user_prot_info)
705 ret = mtd->get_user_prot_info(mtd, buf, 4096);
706 break;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200707 default:
708 break;
Nicolas Pitre31f42332005-02-08 17:45:55 +0000709 }
710 if (ret >= 0) {
711 if (cmd == OTPGETREGIONCOUNT) {
712 int nbr = ret / sizeof(struct otp_info);
713 ret = copy_to_user(argp, &nbr, sizeof(int));
714 } else
715 ret = copy_to_user(argp, buf, ret);
716 if (ret)
717 ret = -EFAULT;
718 }
719 kfree(buf);
720 break;
721 }
722
723 case OTPLOCK:
724 {
Harvey Harrison175428b2008-07-03 23:40:14 -0700725 struct otp_info oinfo;
Nicolas Pitre31f42332005-02-08 17:45:55 +0000726
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200727 if (mfi->mode != MTD_MODE_OTP_USER)
Nicolas Pitre31f42332005-02-08 17:45:55 +0000728 return -EINVAL;
Harvey Harrison175428b2008-07-03 23:40:14 -0700729 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
Nicolas Pitre31f42332005-02-08 17:45:55 +0000730 return -EFAULT;
731 if (!mtd->lock_user_prot_reg)
732 return -EOPNOTSUPP;
Harvey Harrison175428b2008-07-03 23:40:14 -0700733 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
Nicolas Pitre31f42332005-02-08 17:45:55 +0000734 break;
735 }
736#endif
737
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200738 case ECCGETLAYOUT:
739 {
740 if (!mtd->ecclayout)
741 return -EOPNOTSUPP;
742
Ricard Wanderlöfd25ade72006-10-17 17:27:11 +0200743 if (copy_to_user(argp, mtd->ecclayout,
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200744 sizeof(struct nand_ecclayout)))
745 return -EFAULT;
746 break;
747 }
748
749 case ECCGETSTATS:
750 {
751 if (copy_to_user(argp, &mtd->ecc_stats,
752 sizeof(struct mtd_ecc_stats)))
753 return -EFAULT;
754 break;
755 }
756
757 case MTDFILEMODE:
758 {
759 mfi->mode = 0;
760
761 switch(arg) {
762 case MTD_MODE_OTP_FACTORY:
763 case MTD_MODE_OTP_USER:
764 ret = otp_select_filemode(mfi, arg);
765 break;
766
767 case MTD_MODE_RAW:
768 if (!mtd->read_oob || !mtd->write_oob)
769 return -EOPNOTSUPP;
770 mfi->mode = arg;
771
772 case MTD_MODE_NORMAL:
773 break;
774 default:
775 ret = -EINVAL;
776 }
777 file->f_pos = 0;
778 break;
779 }
780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 default:
782 ret = -ENOTTY;
783 }
784
785 return ret;
786} /* memory_ioctl */
787
David Howells402d3262009-02-12 10:40:00 +0000788/*
789 * try to determine where a shared mapping can be made
790 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
791 * mappings)
792 */
793#ifndef CONFIG_MMU
794static unsigned long mtd_get_unmapped_area(struct file *file,
795 unsigned long addr,
796 unsigned long len,
797 unsigned long pgoff,
798 unsigned long flags)
799{
800 struct mtd_file_info *mfi = file->private_data;
801 struct mtd_info *mtd = mfi->mtd;
802
803 if (mtd->get_unmapped_area) {
804 unsigned long offset;
805
806 if (addr != 0)
807 return (unsigned long) -EINVAL;
808
809 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
810 return (unsigned long) -EINVAL;
811
812 offset = pgoff << PAGE_SHIFT;
813 if (offset > mtd->size - len)
814 return (unsigned long) -EINVAL;
815
816 return mtd->get_unmapped_area(mtd, len, offset, flags);
817 }
818
819 /* can't map directly */
820 return (unsigned long) -ENOSYS;
821}
822#endif
823
824/*
825 * set up a mapping for shared memory segments
826 */
827static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
828{
829#ifdef CONFIG_MMU
830 struct mtd_file_info *mfi = file->private_data;
831 struct mtd_info *mtd = mfi->mtd;
832
833 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
834 return 0;
835 return -ENOSYS;
836#else
837 return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
838#endif
839}
840
Arjan van de Vend54b1fd2007-02-12 00:55:34 -0800841static const struct file_operations mtd_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 .owner = THIS_MODULE,
843 .llseek = mtd_lseek,
844 .read = mtd_read,
845 .write = mtd_write,
846 .ioctl = mtd_ioctl,
847 .open = mtd_open,
848 .release = mtd_close,
David Howells402d3262009-02-12 10:40:00 +0000849 .mmap = mtd_mmap,
850#ifndef CONFIG_MMU
851 .get_unmapped_area = mtd_get_unmapped_area,
852#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853};
854
855static int __init init_mtdchar(void)
856{
857 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
858 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
859 MTD_CHAR_MAJOR);
860 return -EAGAIN;
861 }
862
Todd Poynor9bc7b382005-06-30 01:23:27 +0100863 mtd_class = class_create(THIS_MODULE, "mtd");
864
865 if (IS_ERR(mtd_class)) {
866 printk(KERN_ERR "Error creating mtd class.\n");
867 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
Coywolf Qi Hunt3a7a8822005-07-04 12:15:28 -0500868 return PTR_ERR(mtd_class);
Todd Poynor9bc7b382005-06-30 01:23:27 +0100869 }
870
871 register_mtd_user(&notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 return 0;
873}
874
875static void __exit cleanup_mtdchar(void)
876{
Todd Poynor9bc7b382005-06-30 01:23:27 +0100877 unregister_mtd_user(&notifier);
878 class_destroy(mtd_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
880}
881
882module_init(init_mtdchar);
883module_exit(cleanup_mtdchar);
884
885
886MODULE_LICENSE("GPL");
887MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
888MODULE_DESCRIPTION("Direct character-device access to MTD devices");
Scott James Remnant90160e12009-03-02 18:42:39 +0000889MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);