blob: 4a700a137460ee2d2dc31a67e41e7324c3b16a7c [file] [log] [blame]
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001#ifndef NVM_H
2#define NVM_H
3
4enum {
5 NVM_IO_OK = 0,
6 NVM_IO_REQUEUE = 1,
7 NVM_IO_DONE = 2,
8 NVM_IO_ERR = 3,
9
10 NVM_IOTYPE_NONE = 0,
11 NVM_IOTYPE_GC = 1,
12};
13
14#ifdef CONFIG_NVM
15
16#include <linux/blkdev.h>
17#include <linux/types.h>
18#include <linux/file.h>
19#include <linux/dmapool.h>
20
21enum {
22 /* HW Responsibilities */
23 NVM_RSP_L2P = 1 << 0,
24 NVM_RSP_ECC = 1 << 1,
25
26 /* Physical Adressing Mode */
27 NVM_ADDRMODE_LINEAR = 0,
28 NVM_ADDRMODE_CHANNEL = 1,
29
30 /* Plane programming mode for LUN */
31 NVM_PLANE_SINGLE = 0,
32 NVM_PLANE_DOUBLE = 1,
33 NVM_PLANE_QUAD = 2,
34
35 /* Status codes */
36 NVM_RSP_SUCCESS = 0x0,
37 NVM_RSP_NOT_CHANGEABLE = 0x1,
38 NVM_RSP_ERR_FAILWRITE = 0x40ff,
39 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
40
41 /* Device opcodes */
42 NVM_OP_HBREAD = 0x02,
43 NVM_OP_HBWRITE = 0x81,
44 NVM_OP_PWRITE = 0x91,
45 NVM_OP_PREAD = 0x92,
46 NVM_OP_ERASE = 0x90,
47
48 /* PPA Command Flags */
49 NVM_IO_SNGL_ACCESS = 0x0,
50 NVM_IO_DUAL_ACCESS = 0x1,
51 NVM_IO_QUAD_ACCESS = 0x2,
52
Matias Bjørling57b4bd02015-12-06 11:25:47 +010053 /* NAND Access Modes */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010054 NVM_IO_SUSPEND = 0x80,
55 NVM_IO_SLC_MODE = 0x100,
56 NVM_IO_SCRAMBLE_DISABLE = 0x200,
Matias Bjørling57b4bd02015-12-06 11:25:47 +010057
58 /* Block Types */
59 NVM_BLK_T_FREE = 0x0,
60 NVM_BLK_T_BAD = 0x1,
Matias Bjørlingb5d4acd2016-01-12 07:49:32 +010061 NVM_BLK_T_GRWN_BAD = 0x2,
62 NVM_BLK_T_DEV = 0x4,
63 NVM_BLK_T_HOST = 0x8,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010064};
65
66struct nvm_id_group {
67 u8 mtype;
68 u8 fmtype;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010069 u8 num_ch;
70 u8 num_lun;
71 u8 num_pln;
72 u16 num_blk;
73 u16 num_pg;
74 u16 fpg_sz;
75 u16 csecs;
76 u16 sos;
77 u32 trdt;
78 u32 trdm;
79 u32 tprt;
80 u32 tprm;
81 u32 tbet;
82 u32 tbem;
83 u32 mpos;
Matias Bjørling12be5ed2015-11-16 15:34:39 +010084 u32 mccap;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010085 u16 cpar;
Matias Bjørling73387e72015-11-16 15:34:40 +010086};
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010087
88struct nvm_addr_format {
89 u8 ch_offset;
90 u8 ch_len;
91 u8 lun_offset;
92 u8 lun_len;
93 u8 pln_offset;
94 u8 pln_len;
95 u8 blk_offset;
96 u8 blk_len;
97 u8 pg_offset;
98 u8 pg_len;
99 u8 sect_offset;
100 u8 sect_len;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100101};
102
103struct nvm_id {
104 u8 ver_id;
105 u8 vmnt;
106 u8 cgrps;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100107 u32 cap;
108 u32 dom;
109 struct nvm_addr_format ppaf;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100110 struct nvm_id_group groups[4];
111} __packed;
112
113struct nvm_target {
114 struct list_head list;
115 struct nvm_tgt_type *type;
116 struct gendisk *disk;
117};
118
119struct nvm_tgt_instance {
120 struct nvm_tgt_type *tt;
121};
122
123#define ADDR_EMPTY (~0ULL)
124
125#define NVM_VERSION_MAJOR 1
126#define NVM_VERSION_MINOR 0
127#define NVM_VERSION_PATCH 0
128
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100129#define NVM_BLK_BITS (16)
Matias Bjørling7386af22015-11-16 15:34:44 +0100130#define NVM_PG_BITS (16)
131#define NVM_SEC_BITS (8)
132#define NVM_PL_BITS (8)
133#define NVM_LUN_BITS (8)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100134#define NVM_CH_BITS (8)
135
136struct ppa_addr {
Matias Bjørling7386af22015-11-16 15:34:44 +0100137 /* Generic structure for all addresses */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100138 union {
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100139 struct {
Matias Bjørling7386af22015-11-16 15:34:44 +0100140 u64 blk : NVM_BLK_BITS;
141 u64 pg : NVM_PG_BITS;
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100142 u64 sec : NVM_SEC_BITS;
143 u64 pl : NVM_PL_BITS;
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100144 u64 lun : NVM_LUN_BITS;
145 u64 ch : NVM_CH_BITS;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100146 } g;
147
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100148 u64 ppa;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100149 };
Matias Bjørling7386af22015-11-16 15:34:44 +0100150};
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100151
Matias Bjørling912761622016-01-12 07:49:21 +0100152struct nvm_rq;
Matias Bjørling72d256e2016-01-12 07:49:29 +0100153typedef void (nvm_end_io_fn)(struct nvm_rq *);
Matias Bjørling912761622016-01-12 07:49:21 +0100154
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100155struct nvm_rq {
156 struct nvm_tgt_instance *ins;
157 struct nvm_dev *dev;
158
159 struct bio *bio;
160
161 union {
162 struct ppa_addr ppa_addr;
163 dma_addr_t dma_ppa_list;
164 };
165
166 struct ppa_addr *ppa_list;
167
168 void *metadata;
169 dma_addr_t dma_metadata;
170
Matias Bjørling912761622016-01-12 07:49:21 +0100171 struct completion *wait;
172 nvm_end_io_fn *end_io;
173
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100174 uint8_t opcode;
175 uint16_t nr_pages;
176 uint16_t flags;
Matias Bjørling72d256e2016-01-12 07:49:29 +0100177
178 int error;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100179};
180
181static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
182{
183 return pdu - sizeof(struct nvm_rq);
184}
185
186static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
187{
188 return rqdata + 1;
189}
190
191struct nvm_block;
192
193typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
Matias Bjørling11450462015-11-16 15:34:37 +0100194typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
Matias Bjørling16f26c32015-12-06 11:25:48 +0100195typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
196typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100197 nvm_l2p_update_fn *, void *);
Matias Bjørling08236c62015-11-28 16:49:27 +0100198typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100199 nvm_bb_update_fn *, void *);
Matias Bjørling16f26c32015-12-06 11:25:48 +0100200typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
201typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
202typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
203typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100204typedef void (nvm_destroy_dma_pool_fn)(void *);
Matias Bjørling16f26c32015-12-06 11:25:48 +0100205typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100206 dma_addr_t *);
207typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
208
209struct nvm_dev_ops {
210 nvm_id_fn *identity;
211 nvm_get_l2p_tbl_fn *get_l2p_tbl;
212 nvm_op_bb_tbl_fn *get_bb_tbl;
Matias Bjørling11450462015-11-16 15:34:37 +0100213 nvm_op_set_bb_fn *set_bb_tbl;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100214
215 nvm_submit_io_fn *submit_io;
216 nvm_erase_blk_fn *erase_block;
217
218 nvm_create_dma_pool_fn *create_dma_pool;
219 nvm_destroy_dma_pool_fn *destroy_dma_pool;
220 nvm_dev_dma_alloc_fn *dev_dma_alloc;
221 nvm_dev_dma_free_fn *dev_dma_free;
222
Matias Bjørlingaedf17f2015-11-16 15:34:36 +0100223 unsigned int max_phys_sect;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100224};
225
226struct nvm_lun {
227 int id;
228
229 int lun_id;
230 int chnl_id;
231
Javier Gonzalez0b597332015-11-20 13:47:56 +0100232 unsigned int nr_inuse_blocks; /* Number of used blocks */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100233 unsigned int nr_free_blocks; /* Number of unused blocks */
Javier Gonzalez0b597332015-11-20 13:47:56 +0100234 unsigned int nr_bad_blocks; /* Number of bad blocks */
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100235 struct nvm_block *blocks;
236
237 spinlock_t lock;
238};
239
240struct nvm_block {
241 struct list_head list;
242 struct nvm_lun *lun;
243 unsigned long id;
244
245 void *priv;
246 int type;
247};
248
249struct nvm_dev {
250 struct nvm_dev_ops *ops;
251
252 struct list_head devices;
253 struct list_head online_targets;
254
255 /* Media manager */
256 struct nvmm_type *mt;
257 void *mp;
258
259 /* Device information */
260 int nr_chnls;
261 int nr_planes;
262 int luns_per_chnl;
263 int sec_per_pg; /* only sectors for a single page */
264 int pgs_per_blk;
265 int blks_per_lun;
266 int sec_size;
267 int oob_size;
Matias Bjørling7386af22015-11-16 15:34:44 +0100268 struct nvm_addr_format ppaf;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100269
270 /* Calculated/Cached values. These do not reflect the actual usable
271 * blocks at run-time.
272 */
273 int max_rq_size;
274 int plane_mode; /* drive device in single, double or quad mode */
275
276 int sec_per_pl; /* all sectors across planes */
277 int sec_per_blk;
278 int sec_per_lun;
279
280 unsigned long total_pages;
281 unsigned long total_blocks;
282 int nr_luns;
283 unsigned max_pages_per_blk;
284
285 void *ppalist_pool;
286
287 struct nvm_id identity;
288
289 /* Backend device */
290 struct request_queue *q;
291 char name[DISK_NAME_LEN];
292};
293
Matias Bjørling7386af22015-11-16 15:34:44 +0100294static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
295 struct ppa_addr r)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100296{
297 struct ppa_addr l;
298
Matias Bjørling7386af22015-11-16 15:34:44 +0100299 l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
300 l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
301 l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
302 l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
303 l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
304 l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100305
306 return l;
307}
308
Matias Bjørling7386af22015-11-16 15:34:44 +0100309static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
310 struct ppa_addr r)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100311{
312 struct ppa_addr l;
313
Matias Bjørling7386af22015-11-16 15:34:44 +0100314 /*
315 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
316 */
317 l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
318 (((1 << dev->ppaf.blk_len) - 1));
319 l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
320 (((1 << dev->ppaf.pg_len) - 1));
321 l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
322 (((1 << dev->ppaf.sect_len) - 1));
323 l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
324 (((1 << dev->ppaf.pln_len) - 1));
325 l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
326 (((1 << dev->ppaf.lun_len) - 1));
327 l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
328 (((1 << dev->ppaf.ch_len) - 1));
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100329
330 return l;
331}
332
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100333static inline int ppa_empty(struct ppa_addr ppa_addr)
334{
335 return (ppa_addr.ppa == ADDR_EMPTY);
336}
337
338static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
339{
340 ppa_addr->ppa = ADDR_EMPTY;
341}
342
343static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
344 struct nvm_block *blk)
345{
346 struct ppa_addr ppa;
347 struct nvm_lun *lun = blk->lun;
348
349 ppa.ppa = 0;
350 ppa.g.blk = blk->id % dev->blks_per_lun;
351 ppa.g.lun = lun->lun_id;
352 ppa.g.ch = lun->chnl_id;
353
354 return ppa;
355}
356
Jens Axboedece1632015-11-05 10:41:16 -0700357typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100358typedef sector_t (nvm_tgt_capacity_fn)(void *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100359typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
360typedef void (nvm_tgt_exit_fn)(void *);
361
362struct nvm_tgt_type {
363 const char *name;
364 unsigned int version[3];
365
366 /* target entry points */
367 nvm_tgt_make_rq_fn *make_rq;
368 nvm_tgt_capacity_fn *capacity;
Matias Bjørling912761622016-01-12 07:49:21 +0100369 nvm_end_io_fn *end_io;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100370
371 /* module-specific init/teardown */
372 nvm_tgt_init_fn *init;
373 nvm_tgt_exit_fn *exit;
374
375 /* For internal use */
376 struct list_head list;
377};
378
379extern int nvm_register_target(struct nvm_tgt_type *);
380extern void nvm_unregister_target(struct nvm_tgt_type *);
381
382extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
383extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
384
385typedef int (nvmm_register_fn)(struct nvm_dev *);
386typedef void (nvmm_unregister_fn)(struct nvm_dev *);
387typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
388 struct nvm_lun *, unsigned long);
389typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
390typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
391typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
392typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
393typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100394typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
395 unsigned long);
396typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100397typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100398
399struct nvmm_type {
400 const char *name;
401 unsigned int version[3];
402
403 nvmm_register_fn *register_mgr;
404 nvmm_unregister_fn *unregister_mgr;
405
406 /* Block administration callbacks */
407 nvmm_get_blk_fn *get_blk;
408 nvmm_put_blk_fn *put_blk;
409 nvmm_open_blk_fn *open_blk;
410 nvmm_close_blk_fn *close_blk;
411 nvmm_flush_blk_fn *flush_blk;
412
413 nvmm_submit_io_fn *submit_io;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100414 nvmm_erase_blk_fn *erase_blk;
415
416 /* Configuration management */
417 nvmm_get_lun_fn *get_lun;
418
419 /* Statistics */
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100420 nvmm_lun_info_print_fn *lun_info_print;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100421 struct list_head list;
422};
423
424extern int nvm_register_mgr(struct nvmm_type *);
425extern void nvm_unregister_mgr(struct nvmm_type *);
426
427extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
428 unsigned long);
429extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
430
431extern int nvm_register(struct request_queue *, char *,
432 struct nvm_dev_ops *);
433extern void nvm_unregister(char *);
434
435extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
Matias Bjørling069368e2016-01-12 07:49:19 +0100436extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
437extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100438extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
439 struct ppa_addr *, int);
440extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
Matias Bjørling81e681d2016-01-12 07:49:28 +0100441extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100442extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
Matias Bjørling912761622016-01-12 07:49:21 +0100443extern void nvm_end_io(struct nvm_rq *, int);
Matias Bjørling09719b62016-01-12 07:49:30 +0100444extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
445 void *, int);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100446#else /* CONFIG_NVM */
447struct nvm_dev_ops;
448
449static inline int nvm_register(struct request_queue *q, char *disk_name,
450 struct nvm_dev_ops *ops)
451{
452 return -EINVAL;
453}
454static inline void nvm_unregister(char *disk_name) {}
455#endif /* CONFIG_NVM */
456#endif /* LIGHTNVM.H */