blob: 3a810d7bd25e041569260b3adf965b95121d4487 [file] [log] [blame]
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001#ifndef NVM_H
2#define NVM_H
3
Jens Axboea7fd9a42016-01-13 13:04:11 -07004#include <linux/types.h>
5
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01006enum {
7 NVM_IO_OK = 0,
8 NVM_IO_REQUEUE = 1,
9 NVM_IO_DONE = 2,
10 NVM_IO_ERR = 3,
11
12 NVM_IOTYPE_NONE = 0,
13 NVM_IOTYPE_GC = 1,
14};
15
Jens Axboea7fd9a42016-01-13 13:04:11 -070016#define NVM_BLK_BITS (16)
17#define NVM_PG_BITS (16)
18#define NVM_SEC_BITS (8)
19#define NVM_PL_BITS (8)
20#define NVM_LUN_BITS (8)
Matias Bjørlingdf414b32016-05-06 20:03:19 +020021#define NVM_CH_BITS (7)
Jens Axboea7fd9a42016-01-13 13:04:11 -070022
23struct ppa_addr {
24 /* Generic structure for all addresses */
25 union {
26 struct {
27 u64 blk : NVM_BLK_BITS;
28 u64 pg : NVM_PG_BITS;
29 u64 sec : NVM_SEC_BITS;
30 u64 pl : NVM_PL_BITS;
31 u64 lun : NVM_LUN_BITS;
32 u64 ch : NVM_CH_BITS;
Matias Bjørlingdf414b32016-05-06 20:03:19 +020033 u64 reserved : 1;
Jens Axboea7fd9a42016-01-13 13:04:11 -070034 } g;
35
Matias Bjørlingdf414b32016-05-06 20:03:19 +020036 struct {
37 u64 line : 63;
38 u64 is_cached : 1;
39 } c;
40
Jens Axboea7fd9a42016-01-13 13:04:11 -070041 u64 ppa;
42 };
43};
44
45struct nvm_rq;
46struct nvm_id;
47struct nvm_dev;
48
49typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
Jens Axboea7fd9a42016-01-13 13:04:11 -070050typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
51typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
52 nvm_l2p_update_fn *, void *);
Matias Bjørlinge11903f2016-05-06 20:03:05 +020053typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
Matias Bjørling00ee6cc2016-05-06 20:03:09 +020054typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
Jens Axboea7fd9a42016-01-13 13:04:11 -070055typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
56typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
57typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
58typedef void (nvm_destroy_dma_pool_fn)(void *);
59typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
60 dma_addr_t *);
61typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
62
63struct nvm_dev_ops {
64 nvm_id_fn *identity;
65 nvm_get_l2p_tbl_fn *get_l2p_tbl;
66 nvm_op_bb_tbl_fn *get_bb_tbl;
67 nvm_op_set_bb_fn *set_bb_tbl;
68
69 nvm_submit_io_fn *submit_io;
70 nvm_erase_blk_fn *erase_block;
71
72 nvm_create_dma_pool_fn *create_dma_pool;
73 nvm_destroy_dma_pool_fn *destroy_dma_pool;
74 nvm_dev_dma_alloc_fn *dev_dma_alloc;
75 nvm_dev_dma_free_fn *dev_dma_free;
76
77 unsigned int max_phys_sect;
78};
79
80
81
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010082#ifdef CONFIG_NVM
83
84#include <linux/blkdev.h>
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010085#include <linux/file.h>
86#include <linux/dmapool.h>
Matias Bjørlinge3eb3792016-01-12 07:49:36 +010087#include <uapi/linux/lightnvm.h>
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010088
89enum {
90 /* HW Responsibilities */
91 NVM_RSP_L2P = 1 << 0,
92 NVM_RSP_ECC = 1 << 1,
93
94 /* Physical Adressing Mode */
95 NVM_ADDRMODE_LINEAR = 0,
96 NVM_ADDRMODE_CHANNEL = 1,
97
98 /* Plane programming mode for LUN */
Matias Bjørlingd5bdec82016-02-19 13:56:58 +010099 NVM_PLANE_SINGLE = 1,
100 NVM_PLANE_DOUBLE = 2,
101 NVM_PLANE_QUAD = 4,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100102
103 /* Status codes */
104 NVM_RSP_SUCCESS = 0x0,
105 NVM_RSP_NOT_CHANGEABLE = 0x1,
106 NVM_RSP_ERR_FAILWRITE = 0x40ff,
107 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
108
109 /* Device opcodes */
110 NVM_OP_HBREAD = 0x02,
111 NVM_OP_HBWRITE = 0x81,
112 NVM_OP_PWRITE = 0x91,
113 NVM_OP_PREAD = 0x92,
114 NVM_OP_ERASE = 0x90,
115
116 /* PPA Command Flags */
117 NVM_IO_SNGL_ACCESS = 0x0,
118 NVM_IO_DUAL_ACCESS = 0x1,
119 NVM_IO_QUAD_ACCESS = 0x2,
120
Matias Bjørling57b4bd02015-12-06 11:25:47 +0100121 /* NAND Access Modes */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100122 NVM_IO_SUSPEND = 0x80,
123 NVM_IO_SLC_MODE = 0x100,
124 NVM_IO_SCRAMBLE_DISABLE = 0x200,
Matias Bjørling57b4bd02015-12-06 11:25:47 +0100125
126 /* Block Types */
127 NVM_BLK_T_FREE = 0x0,
128 NVM_BLK_T_BAD = 0x1,
Matias Bjørlingb5d4acd2016-01-12 07:49:32 +0100129 NVM_BLK_T_GRWN_BAD = 0x2,
130 NVM_BLK_T_DEV = 0x4,
131 NVM_BLK_T_HOST = 0x8,
Matias Bjørlingf9a99952016-01-12 07:49:34 +0100132
133 /* Memory capabilities */
134 NVM_ID_CAP_SLC = 0x1,
135 NVM_ID_CAP_CMD_SUSPEND = 0x2,
136 NVM_ID_CAP_SCRAMBLE = 0x4,
137 NVM_ID_CAP_ENCRYPT = 0x8,
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100138
139 /* Memory types */
140 NVM_ID_FMTYPE_SLC = 0,
141 NVM_ID_FMTYPE_MLC = 1,
Matias Bjørlingbf643182016-02-04 15:13:27 +0100142
143 /* Device capabilities */
144 NVM_ID_DCAP_BBLKMGMT = 0x1,
145 NVM_UD_DCAP_ECC = 0x2,
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100146};
147
148struct nvm_id_lp_mlc {
149 u16 num_pairs;
150 u8 pairs[886];
151};
152
153struct nvm_id_lp_tbl {
154 __u8 id[8];
155 struct nvm_id_lp_mlc mlc;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100156};
157
158struct nvm_id_group {
159 u8 mtype;
160 u8 fmtype;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100161 u8 num_ch;
162 u8 num_lun;
163 u8 num_pln;
164 u16 num_blk;
165 u16 num_pg;
166 u16 fpg_sz;
167 u16 csecs;
168 u16 sos;
169 u32 trdt;
170 u32 trdm;
171 u32 tprt;
172 u32 tprm;
173 u32 tbet;
174 u32 tbem;
175 u32 mpos;
Matias Bjørling12be5ed2015-11-16 15:34:39 +0100176 u32 mccap;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100177 u16 cpar;
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100178
179 struct nvm_id_lp_tbl lptbl;
Matias Bjørling73387e72015-11-16 15:34:40 +0100180};
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100181
182struct nvm_addr_format {
183 u8 ch_offset;
184 u8 ch_len;
185 u8 lun_offset;
186 u8 lun_len;
187 u8 pln_offset;
188 u8 pln_len;
189 u8 blk_offset;
190 u8 blk_len;
191 u8 pg_offset;
192 u8 pg_len;
193 u8 sect_offset;
194 u8 sect_len;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100195};
196
197struct nvm_id {
198 u8 ver_id;
199 u8 vmnt;
200 u8 cgrps;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100201 u32 cap;
202 u32 dom;
203 struct nvm_addr_format ppaf;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100204 struct nvm_id_group groups[4];
205} __packed;
206
207struct nvm_target {
208 struct list_head list;
Matias Bjørling976bdfc2016-05-06 20:03:17 +0200209 struct nvm_dev *dev;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100210 struct nvm_tgt_type *type;
211 struct gendisk *disk;
212};
213
214struct nvm_tgt_instance {
215 struct nvm_tgt_type *tt;
216};
217
218#define ADDR_EMPTY (~0ULL)
219
220#define NVM_VERSION_MAJOR 1
221#define NVM_VERSION_MINOR 0
222#define NVM_VERSION_PATCH 0
223
Matias Bjørling912761622016-01-12 07:49:21 +0100224struct nvm_rq;
Matias Bjørling72d256e2016-01-12 07:49:29 +0100225typedef void (nvm_end_io_fn)(struct nvm_rq *);
Matias Bjørling912761622016-01-12 07:49:21 +0100226
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100227struct nvm_rq {
228 struct nvm_tgt_instance *ins;
229 struct nvm_dev *dev;
230
231 struct bio *bio;
232
233 union {
234 struct ppa_addr ppa_addr;
235 dma_addr_t dma_ppa_list;
236 };
237
238 struct ppa_addr *ppa_list;
239
Javier González003fad32016-05-06 20:03:12 +0200240 void *meta_list;
241 dma_addr_t dma_meta_list;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100242
Matias Bjørling912761622016-01-12 07:49:21 +0100243 struct completion *wait;
244 nvm_end_io_fn *end_io;
245
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100246 uint8_t opcode;
247 uint16_t nr_pages;
248 uint16_t flags;
Matias Bjørling72d256e2016-01-12 07:49:29 +0100249
Matias Bjorling9f867262016-03-03 15:06:39 +0100250 u64 ppa_status; /* ppa media status */
Matias Bjørling72d256e2016-01-12 07:49:29 +0100251 int error;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100252};
253
254static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
255{
256 return pdu - sizeof(struct nvm_rq);
257}
258
259static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
260{
261 return rqdata + 1;
262}
263
264struct nvm_block;
265
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100266struct nvm_lun {
267 int id;
268
269 int lun_id;
270 int chnl_id;
271
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100272 /* It is up to the target to mark blocks as closed. If the target does
273 * not do it, all blocks are marked as open, and nr_open_blocks
274 * represents the number of blocks in use
275 */
276 unsigned int nr_open_blocks; /* Number of used, writable blocks */
277 unsigned int nr_closed_blocks; /* Number of used, read-only blocks */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100278 unsigned int nr_free_blocks; /* Number of unused blocks */
Javier Gonzalez0b597332015-11-20 13:47:56 +0100279 unsigned int nr_bad_blocks; /* Number of bad blocks */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100280
281 spinlock_t lock;
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100282
283 struct nvm_block *blocks;
284};
285
286enum {
287 NVM_BLK_ST_FREE = 0x1, /* Free block */
288 NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */
289 NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */
290 NVM_BLK_ST_BAD = 0x8, /* Bad block */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100291};
292
293struct nvm_block {
294 struct list_head list;
295 struct nvm_lun *lun;
296 unsigned long id;
297
298 void *priv;
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100299 int state;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100300};
301
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100302/* system block cpu representation */
303struct nvm_sb_info {
304 unsigned long seqnr;
305 unsigned long erase_cnt;
306 unsigned int version;
307 char mmtype[NVM_MMTYPE_LEN];
308 struct ppa_addr fs_ppa;
309};
310
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100311struct nvm_dev {
312 struct nvm_dev_ops *ops;
313
314 struct list_head devices;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100315
316 /* Media manager */
317 struct nvmm_type *mt;
318 void *mp;
319
Matias Bjørlingb7692072016-01-12 07:49:38 +0100320 /* System blocks */
321 struct nvm_sb_info sb;
322
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100323 /* Device information */
324 int nr_chnls;
325 int nr_planes;
326 int luns_per_chnl;
327 int sec_per_pg; /* only sectors for a single page */
328 int pgs_per_blk;
329 int blks_per_lun;
Matias Bjørling4891d122016-05-06 20:02:57 +0200330 int fpg_size;
331 int pfpg_size; /* size of buffer if all pages are to be read */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100332 int sec_size;
333 int oob_size;
Matias Bjørlingf9a99952016-01-12 07:49:34 +0100334 int mccap;
Matias Bjørling7386af22015-11-16 15:34:44 +0100335 struct nvm_addr_format ppaf;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100336
337 /* Calculated/Cached values. These do not reflect the actual usable
338 * blocks at run-time.
339 */
340 int max_rq_size;
341 int plane_mode; /* drive device in single, double or quad mode */
342
343 int sec_per_pl; /* all sectors across planes */
344 int sec_per_blk;
345 int sec_per_lun;
346
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100347 /* lower page table */
348 int lps_per_blk;
349 int *lptbl;
350
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100351 unsigned long total_blocks;
Matias Bjørling4ece44a2016-02-20 08:52:41 +0100352 unsigned long total_secs;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100353 int nr_luns;
354 unsigned max_pages_per_blk;
355
Wenwei Taoda1e2842016-03-03 15:06:38 +0100356 unsigned long *lun_map;
Javier González75b85642016-05-06 20:03:13 +0200357 void *dma_pool;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100358
359 struct nvm_id identity;
360
361 /* Backend device */
362 struct request_queue *q;
363 char name[DISK_NAME_LEN];
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100364
365 struct mutex mlock;
Wenwei Tao4c9dacb2016-03-03 15:06:37 +0100366 spinlock_t lock;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100367};
368
Matias Bjørling7386af22015-11-16 15:34:44 +0100369static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
370 struct ppa_addr r)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100371{
372 struct ppa_addr l;
373
Matias Bjørling7386af22015-11-16 15:34:44 +0100374 l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
375 l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
376 l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
377 l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
378 l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
379 l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100380
381 return l;
382}
383
Matias Bjørling7386af22015-11-16 15:34:44 +0100384static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
385 struct ppa_addr r)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100386{
387 struct ppa_addr l;
388
Matias Bjørling7386af22015-11-16 15:34:44 +0100389 /*
390 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
391 */
392 l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
393 (((1 << dev->ppaf.blk_len) - 1));
394 l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
395 (((1 << dev->ppaf.pg_len) - 1));
396 l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
397 (((1 << dev->ppaf.sect_len) - 1));
398 l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
399 (((1 << dev->ppaf.pln_len) - 1));
400 l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
401 (((1 << dev->ppaf.lun_len) - 1));
402 l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
403 (((1 << dev->ppaf.ch_len) - 1));
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100404
405 return l;
406}
407
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100408static inline int ppa_empty(struct ppa_addr ppa_addr)
409{
410 return (ppa_addr.ppa == ADDR_EMPTY);
411}
412
413static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
414{
415 ppa_addr->ppa = ADDR_EMPTY;
416}
417
418static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
419 struct nvm_block *blk)
420{
421 struct ppa_addr ppa;
422 struct nvm_lun *lun = blk->lun;
423
424 ppa.ppa = 0;
425 ppa.g.blk = blk->id % dev->blks_per_lun;
426 ppa.g.lun = lun->lun_id;
427 ppa.g.ch = lun->chnl_id;
428
429 return ppa;
430}
431
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100432static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
433{
434 return dev->lptbl[slc_pg];
435}
436
Jens Axboedece1632015-11-05 10:41:16 -0700437typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100438typedef sector_t (nvm_tgt_capacity_fn)(void *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100439typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
440typedef void (nvm_tgt_exit_fn)(void *);
441
442struct nvm_tgt_type {
443 const char *name;
444 unsigned int version[3];
445
446 /* target entry points */
447 nvm_tgt_make_rq_fn *make_rq;
448 nvm_tgt_capacity_fn *capacity;
Matias Bjørling912761622016-01-12 07:49:21 +0100449 nvm_end_io_fn *end_io;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100450
451 /* module-specific init/teardown */
452 nvm_tgt_init_fn *init;
453 nvm_tgt_exit_fn *exit;
454
455 /* For internal use */
456 struct list_head list;
457};
458
Simon A. F. Lund6063fe32016-05-06 20:03:02 +0200459extern int nvm_register_tgt_type(struct nvm_tgt_type *);
460extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100461
462extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
463extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
464
465typedef int (nvmm_register_fn)(struct nvm_dev *);
466typedef void (nvmm_unregister_fn)(struct nvm_dev *);
467typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
468 struct nvm_lun *, unsigned long);
469typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
470typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
471typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
472typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
473typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100474typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
475 unsigned long);
Matias Bjørling04a8aa12016-05-06 20:03:18 +0200476typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100477typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
Wenwei Taoda1e2842016-03-03 15:06:38 +0100478typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
479typedef void (nvmm_release_lun)(struct nvm_dev *, int);
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100480typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100481
Wenwei Tao4c9dacb2016-03-03 15:06:37 +0100482typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
483typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
484
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100485struct nvmm_type {
486 const char *name;
487 unsigned int version[3];
488
489 nvmm_register_fn *register_mgr;
490 nvmm_unregister_fn *unregister_mgr;
491
492 /* Block administration callbacks */
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100493 nvmm_get_blk_fn *get_blk_unlocked;
494 nvmm_put_blk_fn *put_blk_unlocked;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100495 nvmm_get_blk_fn *get_blk;
496 nvmm_put_blk_fn *put_blk;
497 nvmm_open_blk_fn *open_blk;
498 nvmm_close_blk_fn *close_blk;
499 nvmm_flush_blk_fn *flush_blk;
500
501 nvmm_submit_io_fn *submit_io;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100502 nvmm_erase_blk_fn *erase_blk;
503
Matias Bjørling04a8aa12016-05-06 20:03:18 +0200504 /* Bad block mgmt */
505 nvmm_mark_blk_fn *mark_blk;
506
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100507 /* Configuration management */
508 nvmm_get_lun_fn *get_lun;
Wenwei Taoda1e2842016-03-03 15:06:38 +0100509 nvmm_reserve_lun *reserve_lun;
510 nvmm_release_lun *release_lun;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100511
512 /* Statistics */
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100513 nvmm_lun_info_print_fn *lun_info_print;
Wenwei Tao4c9dacb2016-03-03 15:06:37 +0100514
515 nvmm_get_area_fn *get_area;
516 nvmm_put_area_fn *put_area;
517
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100518 struct list_head list;
519};
520
521extern int nvm_register_mgr(struct nvmm_type *);
522extern void nvm_unregister_mgr(struct nvmm_type *);
523
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100524extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *,
525 struct nvm_lun *, unsigned long);
526extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *);
527
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100528extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
529 unsigned long);
530extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
531
532extern int nvm_register(struct request_queue *, char *,
533 struct nvm_dev_ops *);
534extern void nvm_unregister(char *);
535
536extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
Matias Bjørling069368e2016-01-12 07:49:19 +0100537extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
538extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100539extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
Matias Bjørling5ebc7d92016-05-06 20:03:07 +0200540 struct ppa_addr *, int, int);
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100541extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
Matias Bjørling81e681d2016-01-12 07:49:28 +0100542extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100543extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
Matias Bjørling912761622016-01-12 07:49:21 +0100544extern void nvm_end_io(struct nvm_rq *, int);
Matias Bjørling09719b62016-01-12 07:49:30 +0100545extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
546 void *, int);
Matias Bjørling1145e632016-05-06 20:02:56 +0200547extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
548 int, void *, int);
Matias Bjørling22e8c972016-05-06 20:02:58 +0200549extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
Matias Bjørlinge11903f2016-05-06 20:03:05 +0200550extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *);
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100551
552/* sysblk.c */
553#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
554
555/* system block on disk representation */
556struct nvm_system_block {
557 __be32 magic; /* magic signature */
558 __be32 seqnr; /* sequence number */
559 __be32 erase_cnt; /* erase count */
560 __be16 version; /* version number */
561 u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */
562 __be64 fs_ppa; /* PPA for media manager
563 * superblock */
564};
565
566extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *);
567extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
568extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
Matias Bjørling8b4970c2016-01-12 07:49:39 +0100569
570extern int nvm_dev_factory(struct nvm_dev *, int flags);
Matias Bjørling51360612016-05-06 20:03:04 +0200571
572#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \
573 for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \
574 (chid)++, (ppa).g.ch = (chid)) \
575 for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \
576 (lunid)++, (ppa).g.lun = (lunid))
577
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100578#else /* CONFIG_NVM */
579struct nvm_dev_ops;
580
581static inline int nvm_register(struct request_queue *q, char *disk_name,
582 struct nvm_dev_ops *ops)
583{
584 return -EINVAL;
585}
586static inline void nvm_unregister(char *disk_name) {}
587#endif /* CONFIG_NVM */
588#endif /* LIGHTNVM.H */