blob: 13f8052b9ff929a6a1809fbadd5a1424fa8304e2 [file] [log] [blame]
Thomas Gleixner61ecfa82005-11-07 11:15:31 +00001/*
David Woodhousea1452a32010-08-08 20:58:20 +01002 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
David Woodhousea1452a32010-08-08 20:58:20 +01004 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 */
19
20#ifndef __MTD_MTD_H__
21#define __MTD_MTD_H__
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/uio.h>
Nicolas Pitre963a6fb2005-04-01 02:59:56 +010025#include <linux/notifier.h>
David Brownell1f24b5a2009-03-26 00:42:41 -070026#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <mtd/mtd-abi.h>
29
Adrian Hunter69423d92008-12-10 13:37:21 +000030#include <asm/div64.h>
31
Brian Norrise2e24e82011-08-23 17:17:36 -070032#define MTD_ERASE_PENDING 0x01
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#define MTD_ERASING 0x02
34#define MTD_ERASE_SUSPEND 0x04
Brian Norrise2e24e82011-08-23 17:17:36 -070035#define MTD_ERASE_DONE 0x08
36#define MTD_ERASE_FAILED 0x10
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Adrian Hunter69423d92008-12-10 13:37:21 +000038#define MTD_FAIL_ADDR_UNKNOWN -1LL
Adrian Hunterbb0eb212008-08-12 12:40:50 +030039
Brian Norrise2e24e82011-08-23 17:17:36 -070040/*
41 * If the erase fails, fail_addr might indicate exactly which block failed. If
42 * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
43 * or was not specific to any particular block.
44 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070045struct erase_info {
46 struct mtd_info *mtd;
Adrian Hunter69423d92008-12-10 13:37:21 +000047 uint64_t addr;
48 uint64_t len;
49 uint64_t fail_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 u_long time;
51 u_long retries;
David Woodhouse26cdb672008-12-10 14:08:12 +000052 unsigned dev;
53 unsigned cell;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 void (*callback) (struct erase_info *self);
55 u_long priv;
56 u_char state;
57 struct erase_info *next;
58};
59
60struct mtd_erase_region_info {
Brian Norrise2e24e82011-08-23 17:17:36 -070061 uint64_t offset; /* At which this region starts, from the beginning of the MTD */
David Woodhouse26cdb672008-12-10 14:08:12 +000062 uint32_t erasesize; /* For this region */
63 uint32_t numblocks; /* Number of blocks of erasesize in this region */
Rodolfo Giometti0ecbc812007-03-26 21:45:43 -080064 unsigned long *lockmap; /* If keeping bitmap of locks */
Linus Torvalds1da177e2005-04-16 15:20:36 -070065};
66
Thomas Gleixner8593fbc2006-05-29 03:26:58 +020067/**
68 * struct mtd_oob_ops - oob operation operands
69 * @mode: operation mode
70 *
Vitaly Wool70145682006-11-03 18:20:38 +030071 * @len: number of data bytes to write/read
Thomas Gleixner8593fbc2006-05-29 03:26:58 +020072 *
Vitaly Wool70145682006-11-03 18:20:38 +030073 * @retlen: number of data bytes written/read
Thomas Gleixner8593fbc2006-05-29 03:26:58 +020074 *
Vitaly Wool70145682006-11-03 18:20:38 +030075 * @ooblen: number of oob bytes to write/read
76 * @oobretlen: number of oob bytes written/read
Thomas Gleixner8593fbc2006-05-29 03:26:58 +020077 * @ooboffs: offset of oob data in the oob area (only relevant when
Brian Norris4180f242011-08-30 18:45:44 -070078 * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
Thomas Gleixner8593fbc2006-05-29 03:26:58 +020079 * @datbuf: data buffer - if NULL only oob data are read/written
80 * @oobbuf: oob data buffer
Artem Bityutskiy73a44212007-01-31 11:43:13 +020081 *
Frederik Schwarzer025dfda2008-10-16 19:02:37 +020082 * Note, it is allowed to read more than one OOB area at one go, but not write.
Artem Bityutskiy73a44212007-01-31 11:43:13 +020083 * The interface assumes that the OOB write requests program only one page's
84 * OOB area.
Thomas Gleixner8593fbc2006-05-29 03:26:58 +020085 */
86struct mtd_oob_ops {
Brian Norris905c6bc2011-08-30 18:45:39 -070087 unsigned int mode;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +020088 size_t len;
89 size_t retlen;
90 size_t ooblen;
Vitaly Wool70145682006-11-03 18:20:38 +030091 size_t oobretlen;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +020092 uint32_t ooboffs;
93 uint8_t *datbuf;
94 uint8_t *oobbuf;
95};
96
Brian Norriscc26c3c2010-08-24 18:12:00 -070097#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
Vipin Kumar2f25ae92012-10-09 16:14:53 +053098#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
Boris Brezillon75eb2ce2016-02-04 09:52:30 +010099/**
100 * struct mtd_oob_region - oob region definition
101 * @offset: region offset
102 * @length: region length
103 *
104 * This structure describes a region of the OOB area, and is used
105 * to retrieve ECC or free bytes sections.
106 * Each section is defined by an offset within the OOB area and a
107 * length.
108 */
109struct mtd_oob_region {
110 u32 offset;
111 u32 length;
112};
113
Boris Brezillonadbbc3b2016-02-03 19:01:31 +0100114/*
115 * struct mtd_ooblayout_ops - NAND OOB layout operations
116 * @ecc: function returning an ECC region in the OOB area.
117 * Should return -ERANGE if %section exceeds the total number of
118 * ECC sections.
119 * @free: function returning a free region in the OOB area.
120 * Should return -ERANGE if %section exceeds the total number of
121 * free sections.
122 */
123struct mtd_ooblayout_ops {
124 int (*ecc)(struct mtd_info *mtd, int section,
125 struct mtd_oob_region *oobecc);
126 int (*free)(struct mtd_info *mtd, int section,
127 struct mtd_oob_region *oobfree);
128};
129
Boris Brezillon477b0222015-11-16 15:53:13 +0100130/**
131 * struct mtd_pairing_info - page pairing information
132 *
133 * @pair: pair id
134 * @group: group id
135 *
136 * The term "pair" is used here, even though TLC NANDs might group pages by 3
137 * (3 bits in a single cell). A pair should regroup all pages that are sharing
138 * the same cell. Pairs are then indexed in ascending order.
139 *
140 * @group is defining the position of a page in a given pair. It can also be
141 * seen as the bit position in the cell: page attached to bit 0 belongs to
142 * group 0, page attached to bit 1 belongs to group 1, etc.
143 *
144 * Example:
145 * The H27UCG8T2BTR-BC datasheet describes the following pairing scheme:
146 *
147 * group-0 group-1
148 *
149 * pair-0 page-0 page-4
150 * pair-1 page-1 page-5
151 * pair-2 page-2 page-8
152 * ...
153 * pair-127 page-251 page-255
154 *
155 *
156 * Note that the "group" and "pair" terms were extracted from Samsung and
157 * Hynix datasheets, and might be referenced under other names in other
158 * datasheets (Micron is describing this concept as "shared pages").
159 */
160struct mtd_pairing_info {
161 int pair;
162 int group;
163};
164
165/**
166 * struct mtd_pairing_scheme - page pairing scheme description
167 *
168 * @ngroups: number of groups. Should be related to the number of bits
169 * per cell.
170 * @get_info: converts a write-unit (page number within an erase block) into
171 * mtd_pairing information (pair + group). This function should
172 * fill the info parameter based on the wunit index or return
173 * -EINVAL if the wunit parameter is invalid.
174 * @get_wunit: converts pairing information into a write-unit (page) number.
175 * This function should return the wunit index pointed by the
176 * pairing information described in the info argument. It should
177 * return -EINVAL, if there's no wunit corresponding to the
178 * passed pairing information.
179 *
180 * See mtd_pairing_info documentation for a detailed explanation of the
181 * pair and group concepts.
182 *
183 * The mtd_pairing_scheme structure provides a generic solution to represent
184 * NAND page pairing scheme. Instead of exposing two big tables to do the
185 * write-unit <-> (pair + group) conversions, we ask the MTD drivers to
186 * implement the ->get_info() and ->get_wunit() functions.
187 *
188 * MTD users will then be able to query these information by using the
189 * mtd_pairing_info_to_wunit() and mtd_wunit_to_pairing_info() helpers.
190 *
191 * @ngroups is here to help MTD users iterating over all the pages in a
192 * given pair. This value can be retrieved by MTD users using the
193 * mtd_pairing_groups() helper.
194 *
195 * Examples are given in the mtd_pairing_info_to_wunit() and
196 * mtd_wunit_to_pairing_info() documentation.
197 */
198struct mtd_pairing_scheme {
199 int ngroups;
200 int (*get_info)(struct mtd_info *mtd, int wunit,
201 struct mtd_pairing_info *info);
202 int (*get_wunit)(struct mtd_info *mtd,
203 const struct mtd_pairing_info *info);
204};
205
Paul Gortmakerde477252011-05-26 13:46:22 -0400206struct module; /* only needed for owner field in mtd_info */
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208struct mtd_info {
209 u_char type;
David Woodhouse26cdb672008-12-10 14:08:12 +0000210 uint32_t flags;
Adrian Hunter69423d92008-12-10 13:37:21 +0000211 uint64_t size; // Total size of the MTD
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
David Woodhouse151e7652006-05-14 01:51:54 +0100213 /* "Major" erase size for the device. Naïve users may take this
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 * to be the only erase size available, or may use the more detailed
215 * information below if they desire
216 */
David Woodhouse26cdb672008-12-10 14:08:12 +0000217 uint32_t erasesize;
Artem B. Bityutskiy783ed812006-06-14 19:53:44 +0400218 /* Minimal writable flash unit size. In case of NOR flash it is 1 (even
219 * though individual bits can be cleared), in case of NAND flash it is
220 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
221 * it is of ECC block size, etc. It is illegal to have writesize = 0.
222 * Any driver registering a struct mtd_info must ensure a writesize of
223 * 1 or larger.
Joern Engel28318772006-05-22 23:18:05 +0200224 */
David Woodhouse26cdb672008-12-10 14:08:12 +0000225 uint32_t writesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Anatolij Gustschin0e4ca7e2010-12-16 23:42:14 +0100227 /*
228 * Size of the write buffer used by the MTD. MTD devices having a write
229 * buffer can write multiple writesize chunks at a time. E.g. while
230 * writing 4 * writesize bytes to a device with 2 * writesize bytes
231 * buffer the MTD driver can (but doesn't have to) do 2 writesize
232 * operations, but not 4. Currently, all NANDs have writebufsize
233 * equivalent to writesize (NAND page size). Some NOR flashes do have
234 * writebufsize greater than writesize.
235 */
236 uint32_t writebufsize;
237
David Woodhouse26cdb672008-12-10 14:08:12 +0000238 uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
239 uint32_t oobavail; // Available OOB bytes per block
Nicolas Pitre638d9832005-08-06 05:40:46 +0100240
Adrian Hunter69423d92008-12-10 13:37:21 +0000241 /*
242 * If erasesize is a power of 2 then the shift is stored in
243 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
244 */
245 unsigned int erasesize_shift;
246 unsigned int writesize_shift;
247 /* Masks based on erasesize_shift and writesize_shift */
248 unsigned int erasesize_mask;
249 unsigned int writesize_mask;
Nicolas Pitre638d9832005-08-06 05:40:46 +0100250
Mike Dunnd062d4e2012-04-25 12:06:08 -0700251 /*
252 * read ops return -EUCLEAN if max number of bitflips corrected on any
253 * one region comprising an ecc step equals or exceeds this value.
254 * Settable by driver, else defaults to ecc_strength. User can override
255 * in sysfs. N.B. The meaning of the -EUCLEAN return code has changed;
256 * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
257 */
258 unsigned int bitflip_threshold;
259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 // Kernel-only stuff starts here.
Greg Kroah-Hartmaneadcf0d2008-07-02 12:46:22 -0700261 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 int index;
263
Boris Brezillonadbbc3b2016-02-03 19:01:31 +0100264 /* OOB layout description */
265 const struct mtd_ooblayout_ops *ooblayout;
266
Boris Brezillon477b0222015-11-16 15:53:13 +0100267 /* NAND pairing scheme, only provided for MLC/TLC NANDs */
268 const struct mtd_pairing_scheme *pairing;
269
Huang Shijiec41a0582013-08-16 10:10:04 +0800270 /* the ecc step size. */
271 unsigned int ecc_step_size;
272
Mike Dunn86c20722012-04-25 12:06:05 -0700273 /* max number of correctible bit errors per ecc step */
Mike Dunn1d0b95b2012-03-11 14:21:10 -0700274 unsigned int ecc_strength;
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 /* Data for variable erase regions. If numeraseregions is zero,
Thomas Gleixner61ecfa82005-11-07 11:15:31 +0000277 * it means that the whole device has erasesize as given above.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 */
279 int numeraseregions;
Thomas Gleixner61ecfa82005-11-07 11:15:31 +0000280 struct mtd_erase_region_info *eraseregions;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
Jörn Engelb38178e2007-09-21 15:41:44 +0200282 /*
Artem Bityutskiy7e1f0dc2011-12-23 15:25:39 +0200283 * Do not call via these pointers, use corresponding mtd_*()
284 * wrappers instead.
Jörn Engelb38178e2007-09-21 15:41:44 +0200285 */
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200286 int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
287 int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
288 size_t *retlen, void **virt, resource_size_t *phys);
Artem Bityutskiy5e4e6e32012-02-03 13:20:43 +0200289 int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200290 unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
291 unsigned long len,
292 unsigned long offset,
293 unsigned long flags);
294 int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
295 size_t *retlen, u_char *buf);
296 int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
297 size_t *retlen, const u_char *buf);
298 int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
299 size_t *retlen, const u_char *buf);
300 int (*_read_oob) (struct mtd_info *mtd, loff_t from,
Artem Bityutskiya2cc5ba2011-12-23 18:29:55 +0200301 struct mtd_oob_ops *ops);
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200302 int (*_write_oob) (struct mtd_info *mtd, loff_t to,
303 struct mtd_oob_ops *ops);
Christian Riesch4b78fc42014-01-28 09:29:44 +0100304 int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len,
305 size_t *retlen, struct otp_info *buf);
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200306 int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
307 size_t len, size_t *retlen, u_char *buf);
Christian Riesch4b78fc42014-01-28 09:29:44 +0100308 int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len,
309 size_t *retlen, struct otp_info *buf);
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200310 int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
311 size_t len, size_t *retlen, u_char *buf);
312 int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
313 size_t len, size_t *retlen, u_char *buf);
314 int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
315 size_t len);
316 int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
Artem Bityutskiyb0a31f72011-12-23 18:59:12 +0200317 unsigned long count, loff_t to, size_t *retlen);
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200318 void (*_sync) (struct mtd_info *mtd);
319 int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
320 int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
321 int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
Ezequiel Garcia8471bb72014-05-21 19:06:12 -0300322 int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200323 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
324 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
325 int (*_suspend) (struct mtd_info *mtd);
326 void (*_resume) (struct mtd_info *mtd);
Brian Norris3efe41b2014-11-26 01:01:08 -0800327 void (*_reboot) (struct mtd_info *mtd);
Artem Bityutskiya88d2dc2011-12-29 11:06:10 +0200328 /*
329 * If the driver is something smart, like UBI, it may need to maintain
330 * its own reference counting. The below functions are only for driver.
331 */
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200332 int (*_get_device) (struct mtd_info *mtd);
333 void (*_put_device) (struct mtd_info *mtd);
David Howells402d3262009-02-12 10:40:00 +0000334
335 /* Backing device capabilities for this device
336 * - provides mmap capabilities
337 */
338 struct backing_dev_info *backing_dev_info;
339
Nicolas Pitre963a6fb2005-04-01 02:59:56 +0100340 struct notifier_block reboot_notifier; /* default mode before reboot */
341
Thomas Gleixner7fac4642006-05-25 09:57:31 +0200342 /* ECC status information */
343 struct mtd_ecc_stats ecc_stats;
Thomas Gleixner29072b92006-09-28 15:38:36 +0200344 /* Subpage shift (NAND) */
345 int subpage_sft;
Thomas Gleixner7fac4642006-05-25 09:57:31 +0200346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 void *priv;
348
349 struct module *owner;
David Brownell1f24b5a2009-03-26 00:42:41 -0700350 struct device dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 int usecount;
352};
353
Boris Brezillon75eb2ce2016-02-04 09:52:30 +0100354int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
355 struct mtd_oob_region *oobecc);
356int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
357 int *section,
358 struct mtd_oob_region *oobregion);
359int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
360 const u8 *oobbuf, int start, int nbytes);
361int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
362 u8 *oobbuf, int start, int nbytes);
363int mtd_ooblayout_free(struct mtd_info *mtd, int section,
364 struct mtd_oob_region *oobfree);
365int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
366 const u8 *oobbuf, int start, int nbytes);
367int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
368 u8 *oobbuf, int start, int nbytes);
369int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
370int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
371
Boris Brezillonadbbc3b2016-02-03 19:01:31 +0100372static inline void mtd_set_ooblayout(struct mtd_info *mtd,
373 const struct mtd_ooblayout_ops *ooblayout)
Boris Brezillon036d6542016-02-03 18:53:44 +0100374{
Boris Brezillonadbbc3b2016-02-03 19:01:31 +0100375 mtd->ooblayout = ooblayout;
Boris Brezillon036d6542016-02-03 18:53:44 +0100376}
377
Boris Brezillon477b0222015-11-16 15:53:13 +0100378static inline void mtd_set_pairing_scheme(struct mtd_info *mtd,
379 const struct mtd_pairing_scheme *pairing)
380{
381 mtd->pairing = pairing;
382}
383
Brian Norris28b8b262015-10-30 20:33:20 -0700384static inline void mtd_set_of_node(struct mtd_info *mtd,
385 struct device_node *np)
386{
387 mtd->dev.of_node = np;
388}
389
390static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
391{
392 return mtd->dev.of_node;
393}
394
Boris BREZILLON29f10582016-03-07 10:46:52 +0100395static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
396{
397 return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
398}
399
Boris Brezillon477b0222015-11-16 15:53:13 +0100400int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
401 struct mtd_pairing_info *info);
402int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
403 const struct mtd_pairing_info *info);
404int mtd_pairing_groups(struct mtd_info *mtd);
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +0200405int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
406int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
407 void **virt, resource_size_t *phys);
408int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
409unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
410 unsigned long offset, unsigned long flags);
411int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
412 u_char *buf);
413int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
414 const u_char *buf);
415int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
416 const u_char *buf);
Artem Bityutskiy7ae79d72011-12-23 18:03:17 +0200417
Brian Norrisd2d48482012-06-22 16:35:38 -0700418int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
Ezequiel Garcia0c034fe2016-04-12 17:46:39 -0300419int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
Artem Bityutskiya2cc5ba2011-12-23 18:29:55 +0200420
Christian Riesch4b78fc42014-01-28 09:29:44 +0100421int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
422 struct otp_info *buf);
Artem Bityutskiyde3cac92012-02-08 16:37:14 +0200423int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
424 size_t *retlen, u_char *buf);
Christian Riesch4b78fc42014-01-28 09:29:44 +0100425int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
426 struct otp_info *buf);
Artem Bityutskiyde3cac92012-02-08 16:37:14 +0200427int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
428 size_t *retlen, u_char *buf);
429int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
430 size_t *retlen, u_char *buf);
431int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
Artem Bityutskiy4403dbfb2011-12-23 18:55:49 +0200432
Artem Bityutskiy1dbebd32011-12-30 16:23:41 +0200433int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
434 unsigned long count, loff_t to, size_t *retlen);
Artem Bityutskiyb0a31f72011-12-23 18:59:12 +0200435
Artem Bityutskiy85f2f2a2011-12-23 19:03:12 +0200436static inline void mtd_sync(struct mtd_info *mtd)
437{
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200438 if (mtd->_sync)
439 mtd->_sync(mtd);
Artem Bityutskiy85f2f2a2011-12-23 19:03:12 +0200440}
441
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +0200442int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
443int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
444int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
Ezequiel Garcia8471bb72014-05-21 19:06:12 -0300445int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +0200446int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
447int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
Artem Bityutskiye95e9782011-12-23 19:21:16 +0200448
Artem Bityutskiy3fe4bae2011-12-23 19:25:16 +0200449static inline int mtd_suspend(struct mtd_info *mtd)
450{
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200451 return mtd->_suspend ? mtd->_suspend(mtd) : 0;
Artem Bityutskiy3fe4bae2011-12-23 19:25:16 +0200452}
453
Artem Bityutskiyead995f2011-12-23 19:31:25 +0200454static inline void mtd_resume(struct mtd_info *mtd)
455{
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200456 if (mtd->_resume)
457 mtd->_resume(mtd);
Artem Bityutskiyead995f2011-12-23 19:31:25 +0200458}
459
David Woodhouse26cdb672008-12-10 14:08:12 +0000460static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
Adrian Hunter69423d92008-12-10 13:37:21 +0000461{
462 if (mtd->erasesize_shift)
463 return sz >> mtd->erasesize_shift;
464 do_div(sz, mtd->erasesize);
465 return sz;
466}
467
David Woodhouse26cdb672008-12-10 14:08:12 +0000468static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
Adrian Hunter69423d92008-12-10 13:37:21 +0000469{
470 if (mtd->erasesize_shift)
471 return sz & mtd->erasesize_mask;
472 return do_div(sz, mtd->erasesize);
473}
474
David Woodhouse26cdb672008-12-10 14:08:12 +0000475static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
Adrian Hunter69423d92008-12-10 13:37:21 +0000476{
477 if (mtd->writesize_shift)
478 return sz >> mtd->writesize_shift;
479 do_div(sz, mtd->writesize);
480 return sz;
481}
482
David Woodhouse26cdb672008-12-10 14:08:12 +0000483static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
Adrian Hunter69423d92008-12-10 13:37:21 +0000484{
485 if (mtd->writesize_shift)
486 return sz & mtd->writesize_mask;
487 return do_div(sz, mtd->writesize);
488}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
Boris Brezillon477b0222015-11-16 15:53:13 +0100490static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
491{
492 return mtd->erasesize / mtd->writesize;
493}
494
495static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
496{
497 return mtd_div_by_ws(mtd_mod_by_eb(offs, mtd), mtd);
498}
499
500static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base,
501 int wunit)
502{
503 return base + (wunit * mtd->writesize);
504}
505
506
Artem Bityutskiyfc002e32011-12-28 18:35:07 +0200507static inline int mtd_has_oob(const struct mtd_info *mtd)
508{
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200509 return mtd->_read_oob && mtd->_write_oob;
Artem Bityutskiyfc002e32011-12-28 18:35:07 +0200510}
511
Huang Shijie818b9732013-09-25 14:58:17 +0800512static inline int mtd_type_is_nand(const struct mtd_info *mtd)
513{
514 return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
515}
516
Artem Bityutskiy8f461a72012-01-02 13:48:54 +0200517static inline int mtd_can_have_bb(const struct mtd_info *mtd)
518{
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200519 return !!mtd->_block_isbad;
Artem Bityutskiy8f461a72012-01-02 13:48:54 +0200520}
521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 /* Kernel-side ioctl definitions */
523
Jamie Ilesf5671ab2011-05-23 17:15:46 +0100524struct mtd_partition;
Dmitry Eremin-Solenikovc7975332011-06-10 18:18:28 +0400525struct mtd_part_parser_data;
Jamie Ilesf5671ab2011-05-23 17:15:46 +0100526
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300527extern int mtd_device_parse_register(struct mtd_info *mtd,
Artem Bityutskiy26a47342013-03-11 15:38:48 +0200528 const char * const *part_probe_types,
529 struct mtd_part_parser_data *parser_data,
530 const struct mtd_partition *defparts,
531 int defnr_parts);
Dmitry Eremin-Solenikov15c60a52011-06-23 15:33:15 +0400532#define mtd_device_register(master, parts, nr_parts) \
533 mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
Jamie Ilesf5671ab2011-05-23 17:15:46 +0100534extern int mtd_device_unregister(struct mtd_info *master);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200536extern int __get_mtd_device(struct mtd_info *mtd);
537extern void __put_mtd_device(struct mtd_info *mtd);
Artem Bityutskiy77993082006-10-11 14:52:44 +0300538extern struct mtd_info *get_mtd_device_nm(const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539extern void put_mtd_device(struct mtd_info *mtd);
540
541
542struct mtd_notifier {
543 void (*add)(struct mtd_info *mtd);
544 void (*remove)(struct mtd_info *mtd);
545 struct list_head list;
546};
547
548
549extern void register_mtd_user (struct mtd_notifier *new);
550extern int unregister_mtd_user (struct mtd_notifier *old);
Grant Erickson33b53712011-04-08 08:51:32 -0700551void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553void mtd_erase_callback(struct erase_info *instr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Brian Norris7387ce72011-09-20 18:30:51 -0700555static inline int mtd_is_bitflip(int err) {
556 return err == -EUCLEAN;
557}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Brian Norris7387ce72011-09-20 18:30:51 -0700559static inline int mtd_is_eccerr(int err) {
560 return err == -EBADMSG;
561}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Brian Norris7387ce72011-09-20 18:30:51 -0700563static inline int mtd_is_bitflip_or_eccerr(int err) {
564 return mtd_is_bitflip(err) || mtd_is_eccerr(err);
565}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100567unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
568
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569#endif /* __MTD_MTD_H__ */