blob: b7001481e20763de6e5135edfc565af8dd0da1bc [file] [log] [blame]
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001#ifndef NVM_H
2#define NVM_H
3
4enum {
5 NVM_IO_OK = 0,
6 NVM_IO_REQUEUE = 1,
7 NVM_IO_DONE = 2,
8 NVM_IO_ERR = 3,
9
10 NVM_IOTYPE_NONE = 0,
11 NVM_IOTYPE_GC = 1,
12};
13
14#ifdef CONFIG_NVM
15
16#include <linux/blkdev.h>
17#include <linux/types.h>
18#include <linux/file.h>
19#include <linux/dmapool.h>
20
21enum {
22 /* HW Responsibilities */
23 NVM_RSP_L2P = 1 << 0,
24 NVM_RSP_ECC = 1 << 1,
25
26 /* Physical Adressing Mode */
27 NVM_ADDRMODE_LINEAR = 0,
28 NVM_ADDRMODE_CHANNEL = 1,
29
30 /* Plane programming mode for LUN */
31 NVM_PLANE_SINGLE = 0,
32 NVM_PLANE_DOUBLE = 1,
33 NVM_PLANE_QUAD = 2,
34
35 /* Status codes */
36 NVM_RSP_SUCCESS = 0x0,
37 NVM_RSP_NOT_CHANGEABLE = 0x1,
38 NVM_RSP_ERR_FAILWRITE = 0x40ff,
39 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
40
41 /* Device opcodes */
42 NVM_OP_HBREAD = 0x02,
43 NVM_OP_HBWRITE = 0x81,
44 NVM_OP_PWRITE = 0x91,
45 NVM_OP_PREAD = 0x92,
46 NVM_OP_ERASE = 0x90,
47
48 /* PPA Command Flags */
49 NVM_IO_SNGL_ACCESS = 0x0,
50 NVM_IO_DUAL_ACCESS = 0x1,
51 NVM_IO_QUAD_ACCESS = 0x2,
52
Matias Bjørling57b4bd02015-12-06 11:25:47 +010053 /* NAND Access Modes */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010054 NVM_IO_SUSPEND = 0x80,
55 NVM_IO_SLC_MODE = 0x100,
56 NVM_IO_SCRAMBLE_DISABLE = 0x200,
Matias Bjørling57b4bd02015-12-06 11:25:47 +010057
58 /* Block Types */
59 NVM_BLK_T_FREE = 0x0,
60 NVM_BLK_T_BAD = 0x1,
61 NVM_BLK_T_DEV = 0x2,
62 NVM_BLK_T_HOST = 0x4,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010063};
64
65struct nvm_id_group {
66 u8 mtype;
67 u8 fmtype;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010068 u8 num_ch;
69 u8 num_lun;
70 u8 num_pln;
71 u16 num_blk;
72 u16 num_pg;
73 u16 fpg_sz;
74 u16 csecs;
75 u16 sos;
76 u32 trdt;
77 u32 trdm;
78 u32 tprt;
79 u32 tprm;
80 u32 tbet;
81 u32 tbem;
82 u32 mpos;
Matias Bjørling12be5ed2015-11-16 15:34:39 +010083 u32 mccap;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010084 u16 cpar;
Matias Bjørling73387e72015-11-16 15:34:40 +010085};
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010086
87struct nvm_addr_format {
88 u8 ch_offset;
89 u8 ch_len;
90 u8 lun_offset;
91 u8 lun_len;
92 u8 pln_offset;
93 u8 pln_len;
94 u8 blk_offset;
95 u8 blk_len;
96 u8 pg_offset;
97 u8 pg_len;
98 u8 sect_offset;
99 u8 sect_len;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100100};
101
102struct nvm_id {
103 u8 ver_id;
104 u8 vmnt;
105 u8 cgrps;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100106 u32 cap;
107 u32 dom;
108 struct nvm_addr_format ppaf;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100109 struct nvm_id_group groups[4];
110} __packed;
111
112struct nvm_target {
113 struct list_head list;
114 struct nvm_tgt_type *type;
115 struct gendisk *disk;
116};
117
118struct nvm_tgt_instance {
119 struct nvm_tgt_type *tt;
120};
121
122#define ADDR_EMPTY (~0ULL)
123
124#define NVM_VERSION_MAJOR 1
125#define NVM_VERSION_MINOR 0
126#define NVM_VERSION_PATCH 0
127
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100128#define NVM_BLK_BITS (16)
Matias Bjørling7386af22015-11-16 15:34:44 +0100129#define NVM_PG_BITS (16)
130#define NVM_SEC_BITS (8)
131#define NVM_PL_BITS (8)
132#define NVM_LUN_BITS (8)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100133#define NVM_CH_BITS (8)
134
135struct ppa_addr {
Matias Bjørling7386af22015-11-16 15:34:44 +0100136 /* Generic structure for all addresses */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100137 union {
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100138 struct {
Matias Bjørling7386af22015-11-16 15:34:44 +0100139 u64 blk : NVM_BLK_BITS;
140 u64 pg : NVM_PG_BITS;
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100141 u64 sec : NVM_SEC_BITS;
142 u64 pl : NVM_PL_BITS;
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100143 u64 lun : NVM_LUN_BITS;
144 u64 ch : NVM_CH_BITS;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100145 } g;
146
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100147 u64 ppa;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100148 };
Matias Bjørling7386af22015-11-16 15:34:44 +0100149};
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100150
Matias Bjørling912761622016-01-12 07:49:21 +0100151struct nvm_rq;
Matias Bjørling72d256e2016-01-12 07:49:29 +0100152typedef void (nvm_end_io_fn)(struct nvm_rq *);
Matias Bjørling912761622016-01-12 07:49:21 +0100153
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100154struct nvm_rq {
155 struct nvm_tgt_instance *ins;
156 struct nvm_dev *dev;
157
158 struct bio *bio;
159
160 union {
161 struct ppa_addr ppa_addr;
162 dma_addr_t dma_ppa_list;
163 };
164
165 struct ppa_addr *ppa_list;
166
167 void *metadata;
168 dma_addr_t dma_metadata;
169
Matias Bjørling912761622016-01-12 07:49:21 +0100170 struct completion *wait;
171 nvm_end_io_fn *end_io;
172
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100173 uint8_t opcode;
174 uint16_t nr_pages;
175 uint16_t flags;
Matias Bjørling72d256e2016-01-12 07:49:29 +0100176
177 int error;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100178};
179
180static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
181{
182 return pdu - sizeof(struct nvm_rq);
183}
184
185static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
186{
187 return rqdata + 1;
188}
189
190struct nvm_block;
191
192typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
Matias Bjørling11450462015-11-16 15:34:37 +0100193typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
Matias Bjørling16f26c32015-12-06 11:25:48 +0100194typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
195typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100196 nvm_l2p_update_fn *, void *);
Matias Bjørling08236c62015-11-28 16:49:27 +0100197typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100198 nvm_bb_update_fn *, void *);
Matias Bjørling16f26c32015-12-06 11:25:48 +0100199typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
200typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
201typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
202typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100203typedef void (nvm_destroy_dma_pool_fn)(void *);
Matias Bjørling16f26c32015-12-06 11:25:48 +0100204typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100205 dma_addr_t *);
206typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
207
208struct nvm_dev_ops {
209 nvm_id_fn *identity;
210 nvm_get_l2p_tbl_fn *get_l2p_tbl;
211 nvm_op_bb_tbl_fn *get_bb_tbl;
Matias Bjørling11450462015-11-16 15:34:37 +0100212 nvm_op_set_bb_fn *set_bb_tbl;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100213
214 nvm_submit_io_fn *submit_io;
215 nvm_erase_blk_fn *erase_block;
216
217 nvm_create_dma_pool_fn *create_dma_pool;
218 nvm_destroy_dma_pool_fn *destroy_dma_pool;
219 nvm_dev_dma_alloc_fn *dev_dma_alloc;
220 nvm_dev_dma_free_fn *dev_dma_free;
221
Matias Bjørlingaedf17f2015-11-16 15:34:36 +0100222 unsigned int max_phys_sect;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100223};
224
225struct nvm_lun {
226 int id;
227
228 int lun_id;
229 int chnl_id;
230
Javier Gonzalez0b597332015-11-20 13:47:56 +0100231 unsigned int nr_inuse_blocks; /* Number of used blocks */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100232 unsigned int nr_free_blocks; /* Number of unused blocks */
Javier Gonzalez0b597332015-11-20 13:47:56 +0100233 unsigned int nr_bad_blocks; /* Number of bad blocks */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100234 struct nvm_block *blocks;
235
236 spinlock_t lock;
237};
238
239struct nvm_block {
240 struct list_head list;
241 struct nvm_lun *lun;
242 unsigned long id;
243
244 void *priv;
245 int type;
246};
247
248struct nvm_dev {
249 struct nvm_dev_ops *ops;
250
251 struct list_head devices;
252 struct list_head online_targets;
253
254 /* Media manager */
255 struct nvmm_type *mt;
256 void *mp;
257
258 /* Device information */
259 int nr_chnls;
260 int nr_planes;
261 int luns_per_chnl;
262 int sec_per_pg; /* only sectors for a single page */
263 int pgs_per_blk;
264 int blks_per_lun;
265 int sec_size;
266 int oob_size;
Matias Bjørling7386af22015-11-16 15:34:44 +0100267 struct nvm_addr_format ppaf;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100268
269 /* Calculated/Cached values. These do not reflect the actual usable
270 * blocks at run-time.
271 */
272 int max_rq_size;
273 int plane_mode; /* drive device in single, double or quad mode */
274
275 int sec_per_pl; /* all sectors across planes */
276 int sec_per_blk;
277 int sec_per_lun;
278
279 unsigned long total_pages;
280 unsigned long total_blocks;
281 int nr_luns;
282 unsigned max_pages_per_blk;
283
284 void *ppalist_pool;
285
286 struct nvm_id identity;
287
288 /* Backend device */
289 struct request_queue *q;
290 char name[DISK_NAME_LEN];
291};
292
Matias Bjørling7386af22015-11-16 15:34:44 +0100293static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
294 struct ppa_addr r)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100295{
296 struct ppa_addr l;
297
Matias Bjørling7386af22015-11-16 15:34:44 +0100298 l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
299 l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
300 l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
301 l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
302 l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
303 l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100304
305 return l;
306}
307
Matias Bjørling7386af22015-11-16 15:34:44 +0100308static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
309 struct ppa_addr r)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100310{
311 struct ppa_addr l;
312
Matias Bjørling7386af22015-11-16 15:34:44 +0100313 /*
314 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
315 */
316 l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
317 (((1 << dev->ppaf.blk_len) - 1));
318 l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
319 (((1 << dev->ppaf.pg_len) - 1));
320 l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
321 (((1 << dev->ppaf.sect_len) - 1));
322 l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
323 (((1 << dev->ppaf.pln_len) - 1));
324 l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
325 (((1 << dev->ppaf.lun_len) - 1));
326 l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
327 (((1 << dev->ppaf.ch_len) - 1));
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100328
329 return l;
330}
331
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100332static inline int ppa_empty(struct ppa_addr ppa_addr)
333{
334 return (ppa_addr.ppa == ADDR_EMPTY);
335}
336
337static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
338{
339 ppa_addr->ppa = ADDR_EMPTY;
340}
341
342static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
343 struct nvm_block *blk)
344{
345 struct ppa_addr ppa;
346 struct nvm_lun *lun = blk->lun;
347
348 ppa.ppa = 0;
349 ppa.g.blk = blk->id % dev->blks_per_lun;
350 ppa.g.lun = lun->lun_id;
351 ppa.g.ch = lun->chnl_id;
352
353 return ppa;
354}
355
Jens Axboedece1632015-11-05 10:41:16 -0700356typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100357typedef sector_t (nvm_tgt_capacity_fn)(void *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100358typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
359typedef void (nvm_tgt_exit_fn)(void *);
360
361struct nvm_tgt_type {
362 const char *name;
363 unsigned int version[3];
364
365 /* target entry points */
366 nvm_tgt_make_rq_fn *make_rq;
367 nvm_tgt_capacity_fn *capacity;
Matias Bjørling912761622016-01-12 07:49:21 +0100368 nvm_end_io_fn *end_io;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100369
370 /* module-specific init/teardown */
371 nvm_tgt_init_fn *init;
372 nvm_tgt_exit_fn *exit;
373
374 /* For internal use */
375 struct list_head list;
376};
377
378extern int nvm_register_target(struct nvm_tgt_type *);
379extern void nvm_unregister_target(struct nvm_tgt_type *);
380
381extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
382extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
383
384typedef int (nvmm_register_fn)(struct nvm_dev *);
385typedef void (nvmm_unregister_fn)(struct nvm_dev *);
386typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
387 struct nvm_lun *, unsigned long);
388typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
389typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
390typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
391typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
392typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100393typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
394 unsigned long);
395typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100396typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100397
398struct nvmm_type {
399 const char *name;
400 unsigned int version[3];
401
402 nvmm_register_fn *register_mgr;
403 nvmm_unregister_fn *unregister_mgr;
404
405 /* Block administration callbacks */
406 nvmm_get_blk_fn *get_blk;
407 nvmm_put_blk_fn *put_blk;
408 nvmm_open_blk_fn *open_blk;
409 nvmm_close_blk_fn *close_blk;
410 nvmm_flush_blk_fn *flush_blk;
411
412 nvmm_submit_io_fn *submit_io;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100413 nvmm_erase_blk_fn *erase_blk;
414
415 /* Configuration management */
416 nvmm_get_lun_fn *get_lun;
417
418 /* Statistics */
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100419 nvmm_lun_info_print_fn *lun_info_print;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100420 struct list_head list;
421};
422
423extern int nvm_register_mgr(struct nvmm_type *);
424extern void nvm_unregister_mgr(struct nvmm_type *);
425
426extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
427 unsigned long);
428extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
429
430extern int nvm_register(struct request_queue *, char *,
431 struct nvm_dev_ops *);
432extern void nvm_unregister(char *);
433
434extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
Matias Bjørling069368e2016-01-12 07:49:19 +0100435extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
436extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100437extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
438 struct ppa_addr *, int);
439extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
Matias Bjørling81e681d2016-01-12 07:49:28 +0100440extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100441extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
Matias Bjørling912761622016-01-12 07:49:21 +0100442extern void nvm_end_io(struct nvm_rq *, int);
Matias Bjørling09719b62016-01-12 07:49:30 +0100443extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
444 void *, int);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100445#else /* CONFIG_NVM */
446struct nvm_dev_ops;
447
448static inline int nvm_register(struct request_queue *q, char *disk_name,
449 struct nvm_dev_ops *ops)
450{
451 return -EINVAL;
452}
453static inline void nvm_unregister(char *disk_name) {}
454#endif /* CONFIG_NVM */
455#endif /* LIGHTNVM.H */