blob: f7c607f96858706db7db55221d47ddf9a595b555 [file] [log] [blame]
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001#ifndef NVM_H
2#define NVM_H
3
Jens Axboea7fd9a42016-01-13 13:04:11 -07004#include <linux/types.h>
5
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01006enum {
7 NVM_IO_OK = 0,
8 NVM_IO_REQUEUE = 1,
9 NVM_IO_DONE = 2,
10 NVM_IO_ERR = 3,
11
12 NVM_IOTYPE_NONE = 0,
13 NVM_IOTYPE_GC = 1,
14};
15
Jens Axboea7fd9a42016-01-13 13:04:11 -070016#define NVM_BLK_BITS (16)
17#define NVM_PG_BITS (16)
18#define NVM_SEC_BITS (8)
19#define NVM_PL_BITS (8)
20#define NVM_LUN_BITS (8)
21#define NVM_CH_BITS (8)
22
23struct ppa_addr {
24 /* Generic structure for all addresses */
25 union {
26 struct {
27 u64 blk : NVM_BLK_BITS;
28 u64 pg : NVM_PG_BITS;
29 u64 sec : NVM_SEC_BITS;
30 u64 pl : NVM_PL_BITS;
31 u64 lun : NVM_LUN_BITS;
32 u64 ch : NVM_CH_BITS;
33 } g;
34
35 u64 ppa;
36 };
37};
38
39struct nvm_rq;
40struct nvm_id;
41struct nvm_dev;
42
43typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
44typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
45typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
46typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
47 nvm_l2p_update_fn *, void *);
48typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
49 nvm_bb_update_fn *, void *);
50typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
51typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
52typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
53typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
54typedef void (nvm_destroy_dma_pool_fn)(void *);
55typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
56 dma_addr_t *);
57typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
58
59struct nvm_dev_ops {
60 nvm_id_fn *identity;
61 nvm_get_l2p_tbl_fn *get_l2p_tbl;
62 nvm_op_bb_tbl_fn *get_bb_tbl;
63 nvm_op_set_bb_fn *set_bb_tbl;
64
65 nvm_submit_io_fn *submit_io;
66 nvm_erase_blk_fn *erase_block;
67
68 nvm_create_dma_pool_fn *create_dma_pool;
69 nvm_destroy_dma_pool_fn *destroy_dma_pool;
70 nvm_dev_dma_alloc_fn *dev_dma_alloc;
71 nvm_dev_dma_free_fn *dev_dma_free;
72
73 unsigned int max_phys_sect;
74};
75
76
77
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010078#ifdef CONFIG_NVM
79
80#include <linux/blkdev.h>
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010081#include <linux/file.h>
82#include <linux/dmapool.h>
Matias Bjørlinge3eb3792016-01-12 07:49:36 +010083#include <uapi/linux/lightnvm.h>
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010084
85enum {
86 /* HW Responsibilities */
87 NVM_RSP_L2P = 1 << 0,
88 NVM_RSP_ECC = 1 << 1,
89
90 /* Physical Adressing Mode */
91 NVM_ADDRMODE_LINEAR = 0,
92 NVM_ADDRMODE_CHANNEL = 1,
93
94 /* Plane programming mode for LUN */
Matias Bjørlingd5bdec82016-02-19 13:56:58 +010095 NVM_PLANE_SINGLE = 1,
96 NVM_PLANE_DOUBLE = 2,
97 NVM_PLANE_QUAD = 4,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010098
99 /* Status codes */
100 NVM_RSP_SUCCESS = 0x0,
101 NVM_RSP_NOT_CHANGEABLE = 0x1,
102 NVM_RSP_ERR_FAILWRITE = 0x40ff,
103 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
104
105 /* Device opcodes */
106 NVM_OP_HBREAD = 0x02,
107 NVM_OP_HBWRITE = 0x81,
108 NVM_OP_PWRITE = 0x91,
109 NVM_OP_PREAD = 0x92,
110 NVM_OP_ERASE = 0x90,
111
112 /* PPA Command Flags */
113 NVM_IO_SNGL_ACCESS = 0x0,
114 NVM_IO_DUAL_ACCESS = 0x1,
115 NVM_IO_QUAD_ACCESS = 0x2,
116
Matias Bjørling57b4bd02015-12-06 11:25:47 +0100117 /* NAND Access Modes */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100118 NVM_IO_SUSPEND = 0x80,
119 NVM_IO_SLC_MODE = 0x100,
120 NVM_IO_SCRAMBLE_DISABLE = 0x200,
Matias Bjørling57b4bd02015-12-06 11:25:47 +0100121
122 /* Block Types */
123 NVM_BLK_T_FREE = 0x0,
124 NVM_BLK_T_BAD = 0x1,
Matias Bjørlingb5d4acd2016-01-12 07:49:32 +0100125 NVM_BLK_T_GRWN_BAD = 0x2,
126 NVM_BLK_T_DEV = 0x4,
127 NVM_BLK_T_HOST = 0x8,
Matias Bjørlingf9a99952016-01-12 07:49:34 +0100128
129 /* Memory capabilities */
130 NVM_ID_CAP_SLC = 0x1,
131 NVM_ID_CAP_CMD_SUSPEND = 0x2,
132 NVM_ID_CAP_SCRAMBLE = 0x4,
133 NVM_ID_CAP_ENCRYPT = 0x8,
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100134
135 /* Memory types */
136 NVM_ID_FMTYPE_SLC = 0,
137 NVM_ID_FMTYPE_MLC = 1,
Matias Bjørlingbf643182016-02-04 15:13:27 +0100138
139 /* Device capabilities */
140 NVM_ID_DCAP_BBLKMGMT = 0x1,
141 NVM_UD_DCAP_ECC = 0x2,
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100142};
143
144struct nvm_id_lp_mlc {
145 u16 num_pairs;
146 u8 pairs[886];
147};
148
149struct nvm_id_lp_tbl {
150 __u8 id[8];
151 struct nvm_id_lp_mlc mlc;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100152};
153
154struct nvm_id_group {
155 u8 mtype;
156 u8 fmtype;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100157 u8 num_ch;
158 u8 num_lun;
159 u8 num_pln;
160 u16 num_blk;
161 u16 num_pg;
162 u16 fpg_sz;
163 u16 csecs;
164 u16 sos;
165 u32 trdt;
166 u32 trdm;
167 u32 tprt;
168 u32 tprm;
169 u32 tbet;
170 u32 tbem;
171 u32 mpos;
Matias Bjørling12be5ed2015-11-16 15:34:39 +0100172 u32 mccap;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100173 u16 cpar;
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100174
175 struct nvm_id_lp_tbl lptbl;
Matias Bjørling73387e72015-11-16 15:34:40 +0100176};
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100177
178struct nvm_addr_format {
179 u8 ch_offset;
180 u8 ch_len;
181 u8 lun_offset;
182 u8 lun_len;
183 u8 pln_offset;
184 u8 pln_len;
185 u8 blk_offset;
186 u8 blk_len;
187 u8 pg_offset;
188 u8 pg_len;
189 u8 sect_offset;
190 u8 sect_len;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100191};
192
193struct nvm_id {
194 u8 ver_id;
195 u8 vmnt;
196 u8 cgrps;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100197 u32 cap;
198 u32 dom;
199 struct nvm_addr_format ppaf;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100200 struct nvm_id_group groups[4];
201} __packed;
202
203struct nvm_target {
204 struct list_head list;
205 struct nvm_tgt_type *type;
206 struct gendisk *disk;
207};
208
209struct nvm_tgt_instance {
210 struct nvm_tgt_type *tt;
211};
212
213#define ADDR_EMPTY (~0ULL)
214
215#define NVM_VERSION_MAJOR 1
216#define NVM_VERSION_MINOR 0
217#define NVM_VERSION_PATCH 0
218
Matias Bjørling912761622016-01-12 07:49:21 +0100219struct nvm_rq;
Matias Bjørling72d256e2016-01-12 07:49:29 +0100220typedef void (nvm_end_io_fn)(struct nvm_rq *);
Matias Bjørling912761622016-01-12 07:49:21 +0100221
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100222struct nvm_rq {
223 struct nvm_tgt_instance *ins;
224 struct nvm_dev *dev;
225
226 struct bio *bio;
227
228 union {
229 struct ppa_addr ppa_addr;
230 dma_addr_t dma_ppa_list;
231 };
232
233 struct ppa_addr *ppa_list;
234
235 void *metadata;
236 dma_addr_t dma_metadata;
237
Matias Bjørling912761622016-01-12 07:49:21 +0100238 struct completion *wait;
239 nvm_end_io_fn *end_io;
240
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100241 uint8_t opcode;
242 uint16_t nr_pages;
243 uint16_t flags;
Matias Bjørling72d256e2016-01-12 07:49:29 +0100244
Matias Bjorling9f867262016-03-03 15:06:39 +0100245 u64 ppa_status; /* ppa media status */
Matias Bjørling72d256e2016-01-12 07:49:29 +0100246 int error;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100247};
248
249static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
250{
251 return pdu - sizeof(struct nvm_rq);
252}
253
254static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
255{
256 return rqdata + 1;
257}
258
259struct nvm_block;
260
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100261struct nvm_lun {
262 int id;
263
264 int lun_id;
265 int chnl_id;
266
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100267 /* It is up to the target to mark blocks as closed. If the target does
268 * not do it, all blocks are marked as open, and nr_open_blocks
269 * represents the number of blocks in use
270 */
271 unsigned int nr_open_blocks; /* Number of used, writable blocks */
272 unsigned int nr_closed_blocks; /* Number of used, read-only blocks */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100273 unsigned int nr_free_blocks; /* Number of unused blocks */
Javier Gonzalez0b597332015-11-20 13:47:56 +0100274 unsigned int nr_bad_blocks; /* Number of bad blocks */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100275
276 spinlock_t lock;
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100277
278 struct nvm_block *blocks;
279};
280
281enum {
282 NVM_BLK_ST_FREE = 0x1, /* Free block */
283 NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */
284 NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */
285 NVM_BLK_ST_BAD = 0x8, /* Bad block */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100286};
287
288struct nvm_block {
289 struct list_head list;
290 struct nvm_lun *lun;
291 unsigned long id;
292
293 void *priv;
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100294 int state;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100295};
296
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100297/* system block cpu representation */
298struct nvm_sb_info {
299 unsigned long seqnr;
300 unsigned long erase_cnt;
301 unsigned int version;
302 char mmtype[NVM_MMTYPE_LEN];
303 struct ppa_addr fs_ppa;
304};
305
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100306struct nvm_dev {
307 struct nvm_dev_ops *ops;
308
309 struct list_head devices;
310 struct list_head online_targets;
311
312 /* Media manager */
313 struct nvmm_type *mt;
314 void *mp;
315
Matias Bjørlingb7692072016-01-12 07:49:38 +0100316 /* System blocks */
317 struct nvm_sb_info sb;
318
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100319 /* Device information */
320 int nr_chnls;
321 int nr_planes;
322 int luns_per_chnl;
323 int sec_per_pg; /* only sectors for a single page */
324 int pgs_per_blk;
325 int blks_per_lun;
Matias Bjørling4891d122016-05-06 20:02:57 +0200326 int fpg_size;
327 int pfpg_size; /* size of buffer if all pages are to be read */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100328 int sec_size;
329 int oob_size;
Matias Bjørlingf9a99952016-01-12 07:49:34 +0100330 int mccap;
Matias Bjørling7386af22015-11-16 15:34:44 +0100331 struct nvm_addr_format ppaf;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100332
333 /* Calculated/Cached values. These do not reflect the actual usable
334 * blocks at run-time.
335 */
336 int max_rq_size;
337 int plane_mode; /* drive device in single, double or quad mode */
338
339 int sec_per_pl; /* all sectors across planes */
340 int sec_per_blk;
341 int sec_per_lun;
342
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100343 /* lower page table */
344 int lps_per_blk;
345 int *lptbl;
346
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100347 unsigned long total_blocks;
Matias Bjørling4ece44a2016-02-20 08:52:41 +0100348 unsigned long total_secs;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100349 int nr_luns;
350 unsigned max_pages_per_blk;
351
Wenwei Taoda1e2842016-03-03 15:06:38 +0100352 unsigned long *lun_map;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100353 void *ppalist_pool;
354
355 struct nvm_id identity;
356
357 /* Backend device */
358 struct request_queue *q;
359 char name[DISK_NAME_LEN];
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100360
361 struct mutex mlock;
Wenwei Tao4c9dacb2016-03-03 15:06:37 +0100362 spinlock_t lock;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100363};
364
Matias Bjørling7386af22015-11-16 15:34:44 +0100365static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
366 struct ppa_addr r)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100367{
368 struct ppa_addr l;
369
Matias Bjørling7386af22015-11-16 15:34:44 +0100370 l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
371 l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
372 l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
373 l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
374 l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
375 l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100376
377 return l;
378}
379
Matias Bjørling7386af22015-11-16 15:34:44 +0100380static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
381 struct ppa_addr r)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100382{
383 struct ppa_addr l;
384
Matias Bjørling7386af22015-11-16 15:34:44 +0100385 /*
386 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
387 */
388 l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
389 (((1 << dev->ppaf.blk_len) - 1));
390 l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
391 (((1 << dev->ppaf.pg_len) - 1));
392 l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
393 (((1 << dev->ppaf.sect_len) - 1));
394 l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
395 (((1 << dev->ppaf.pln_len) - 1));
396 l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
397 (((1 << dev->ppaf.lun_len) - 1));
398 l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
399 (((1 << dev->ppaf.ch_len) - 1));
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100400
401 return l;
402}
403
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100404static inline int ppa_empty(struct ppa_addr ppa_addr)
405{
406 return (ppa_addr.ppa == ADDR_EMPTY);
407}
408
409static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
410{
411 ppa_addr->ppa = ADDR_EMPTY;
412}
413
414static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
415 struct nvm_block *blk)
416{
417 struct ppa_addr ppa;
418 struct nvm_lun *lun = blk->lun;
419
420 ppa.ppa = 0;
421 ppa.g.blk = blk->id % dev->blks_per_lun;
422 ppa.g.lun = lun->lun_id;
423 ppa.g.ch = lun->chnl_id;
424
425 return ppa;
426}
427
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100428static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
429{
430 return dev->lptbl[slc_pg];
431}
432
Jens Axboedece1632015-11-05 10:41:16 -0700433typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100434typedef sector_t (nvm_tgt_capacity_fn)(void *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100435typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
436typedef void (nvm_tgt_exit_fn)(void *);
437
438struct nvm_tgt_type {
439 const char *name;
440 unsigned int version[3];
441
442 /* target entry points */
443 nvm_tgt_make_rq_fn *make_rq;
444 nvm_tgt_capacity_fn *capacity;
Matias Bjørling912761622016-01-12 07:49:21 +0100445 nvm_end_io_fn *end_io;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100446
447 /* module-specific init/teardown */
448 nvm_tgt_init_fn *init;
449 nvm_tgt_exit_fn *exit;
450
451 /* For internal use */
452 struct list_head list;
453};
454
455extern int nvm_register_target(struct nvm_tgt_type *);
456extern void nvm_unregister_target(struct nvm_tgt_type *);
457
458extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
459extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
460
461typedef int (nvmm_register_fn)(struct nvm_dev *);
462typedef void (nvmm_unregister_fn)(struct nvm_dev *);
463typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
464 struct nvm_lun *, unsigned long);
465typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
466typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
467typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
468typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
469typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100470typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
471 unsigned long);
472typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
Wenwei Taoda1e2842016-03-03 15:06:38 +0100473typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
474typedef void (nvmm_release_lun)(struct nvm_dev *, int);
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100475typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100476
Wenwei Tao4c9dacb2016-03-03 15:06:37 +0100477typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
478typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
479
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100480struct nvmm_type {
481 const char *name;
482 unsigned int version[3];
483
484 nvmm_register_fn *register_mgr;
485 nvmm_unregister_fn *unregister_mgr;
486
487 /* Block administration callbacks */
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100488 nvmm_get_blk_fn *get_blk_unlocked;
489 nvmm_put_blk_fn *put_blk_unlocked;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100490 nvmm_get_blk_fn *get_blk;
491 nvmm_put_blk_fn *put_blk;
492 nvmm_open_blk_fn *open_blk;
493 nvmm_close_blk_fn *close_blk;
494 nvmm_flush_blk_fn *flush_blk;
495
496 nvmm_submit_io_fn *submit_io;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100497 nvmm_erase_blk_fn *erase_blk;
498
499 /* Configuration management */
500 nvmm_get_lun_fn *get_lun;
Wenwei Taoda1e2842016-03-03 15:06:38 +0100501 nvmm_reserve_lun *reserve_lun;
502 nvmm_release_lun *release_lun;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100503
504 /* Statistics */
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100505 nvmm_lun_info_print_fn *lun_info_print;
Wenwei Tao4c9dacb2016-03-03 15:06:37 +0100506
507 nvmm_get_area_fn *get_area;
508 nvmm_put_area_fn *put_area;
509
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100510 struct list_head list;
511};
512
513extern int nvm_register_mgr(struct nvmm_type *);
514extern void nvm_unregister_mgr(struct nvmm_type *);
515
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100516extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *,
517 struct nvm_lun *, unsigned long);
518extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *);
519
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100520extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
521 unsigned long);
522extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
523
524extern int nvm_register(struct request_queue *, char *,
525 struct nvm_dev_ops *);
526extern void nvm_unregister(char *);
527
528extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
Matias Bjørling069368e2016-01-12 07:49:19 +0100529extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
530extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100531extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
532 struct ppa_addr *, int);
533extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
Matias Bjørling81e681d2016-01-12 07:49:28 +0100534extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100535extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
Matias Bjørling912761622016-01-12 07:49:21 +0100536extern void nvm_end_io(struct nvm_rq *, int);
Matias Bjørling09719b62016-01-12 07:49:30 +0100537extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
538 void *, int);
Matias Bjørling1145e632016-05-06 20:02:56 +0200539extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
540 int, void *, int);
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100541
542/* sysblk.c */
543#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
544
545/* system block on disk representation */
546struct nvm_system_block {
547 __be32 magic; /* magic signature */
548 __be32 seqnr; /* sequence number */
549 __be32 erase_cnt; /* erase count */
550 __be16 version; /* version number */
551 u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */
552 __be64 fs_ppa; /* PPA for media manager
553 * superblock */
554};
555
556extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *);
557extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
558extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
Matias Bjørling8b4970c2016-01-12 07:49:39 +0100559
560extern int nvm_dev_factory(struct nvm_dev *, int flags);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100561#else /* CONFIG_NVM */
562struct nvm_dev_ops;
563
564static inline int nvm_register(struct request_queue *q, char *disk_name,
565 struct nvm_dev_ops *ops)
566{
567 return -EINVAL;
568}
569static inline void nvm_unregister(char *disk_name) {}
570#endif /* CONFIG_NVM */
571#endif /* LIGHTNVM.H */