blob: 69c9057e1ab89330b1ed22cd0681bf5440ce0096 [file] [log] [blame]
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001#ifndef NVM_H
2#define NVM_H
3
4enum {
5 NVM_IO_OK = 0,
6 NVM_IO_REQUEUE = 1,
7 NVM_IO_DONE = 2,
8 NVM_IO_ERR = 3,
9
10 NVM_IOTYPE_NONE = 0,
11 NVM_IOTYPE_GC = 1,
12};
13
14#ifdef CONFIG_NVM
15
16#include <linux/blkdev.h>
17#include <linux/types.h>
18#include <linux/file.h>
19#include <linux/dmapool.h>
20
21enum {
22 /* HW Responsibilities */
23 NVM_RSP_L2P = 1 << 0,
24 NVM_RSP_ECC = 1 << 1,
25
26 /* Physical Adressing Mode */
27 NVM_ADDRMODE_LINEAR = 0,
28 NVM_ADDRMODE_CHANNEL = 1,
29
30 /* Plane programming mode for LUN */
31 NVM_PLANE_SINGLE = 0,
32 NVM_PLANE_DOUBLE = 1,
33 NVM_PLANE_QUAD = 2,
34
35 /* Status codes */
36 NVM_RSP_SUCCESS = 0x0,
37 NVM_RSP_NOT_CHANGEABLE = 0x1,
38 NVM_RSP_ERR_FAILWRITE = 0x40ff,
39 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
40
41 /* Device opcodes */
42 NVM_OP_HBREAD = 0x02,
43 NVM_OP_HBWRITE = 0x81,
44 NVM_OP_PWRITE = 0x91,
45 NVM_OP_PREAD = 0x92,
46 NVM_OP_ERASE = 0x90,
47
48 /* PPA Command Flags */
49 NVM_IO_SNGL_ACCESS = 0x0,
50 NVM_IO_DUAL_ACCESS = 0x1,
51 NVM_IO_QUAD_ACCESS = 0x2,
52
53 NVM_IO_SUSPEND = 0x80,
54 NVM_IO_SLC_MODE = 0x100,
55 NVM_IO_SCRAMBLE_DISABLE = 0x200,
56};
57
58struct nvm_id_group {
59 u8 mtype;
60 u8 fmtype;
61 u16 res16;
62 u8 num_ch;
63 u8 num_lun;
64 u8 num_pln;
65 u16 num_blk;
66 u16 num_pg;
67 u16 fpg_sz;
68 u16 csecs;
69 u16 sos;
70 u32 trdt;
71 u32 trdm;
72 u32 tprt;
73 u32 tprm;
74 u32 tbet;
75 u32 tbem;
76 u32 mpos;
77 u16 cpar;
78 u8 res[913];
79} __packed;
80
81struct nvm_addr_format {
82 u8 ch_offset;
83 u8 ch_len;
84 u8 lun_offset;
85 u8 lun_len;
86 u8 pln_offset;
87 u8 pln_len;
88 u8 blk_offset;
89 u8 blk_len;
90 u8 pg_offset;
91 u8 pg_len;
92 u8 sect_offset;
93 u8 sect_len;
94 u8 res[4];
95};
96
97struct nvm_id {
98 u8 ver_id;
99 u8 vmnt;
100 u8 cgrps;
101 u8 res[5];
102 u32 cap;
103 u32 dom;
104 struct nvm_addr_format ppaf;
105 u8 ppat;
106 u8 resv[224];
107 struct nvm_id_group groups[4];
108} __packed;
109
110struct nvm_target {
111 struct list_head list;
112 struct nvm_tgt_type *type;
113 struct gendisk *disk;
114};
115
116struct nvm_tgt_instance {
117 struct nvm_tgt_type *tt;
118};
119
120#define ADDR_EMPTY (~0ULL)
121
122#define NVM_VERSION_MAJOR 1
123#define NVM_VERSION_MINOR 0
124#define NVM_VERSION_PATCH 0
125
126#define NVM_SEC_BITS (8)
127#define NVM_PL_BITS (6)
128#define NVM_PG_BITS (16)
129#define NVM_BLK_BITS (16)
130#define NVM_LUN_BITS (10)
131#define NVM_CH_BITS (8)
132
133struct ppa_addr {
134 union {
135 /* Channel-based PPA format in nand 4x2x2x2x8x10 */
136 struct {
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100137 u64 ch : 4;
138 u64 sec : 2; /* 4 sectors per page */
139 u64 pl : 2; /* 4 planes per LUN */
140 u64 lun : 2; /* 4 LUNs per channel */
141 u64 pg : 8; /* 256 pages per block */
142 u64 blk : 10;/* 1024 blocks per plane */
143 u64 resved : 36;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100144 } chnl;
145
146 /* Generic structure for all addresses */
147 struct {
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100148 u64 sec : NVM_SEC_BITS;
149 u64 pl : NVM_PL_BITS;
150 u64 pg : NVM_PG_BITS;
151 u64 blk : NVM_BLK_BITS;
152 u64 lun : NVM_LUN_BITS;
153 u64 ch : NVM_CH_BITS;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100154 } g;
155
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100156 u64 ppa;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100157 };
158} __packed;
159
160struct nvm_rq {
161 struct nvm_tgt_instance *ins;
162 struct nvm_dev *dev;
163
164 struct bio *bio;
165
166 union {
167 struct ppa_addr ppa_addr;
168 dma_addr_t dma_ppa_list;
169 };
170
171 struct ppa_addr *ppa_list;
172
173 void *metadata;
174 dma_addr_t dma_metadata;
175
176 uint8_t opcode;
177 uint16_t nr_pages;
178 uint16_t flags;
179};
180
181static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
182{
183 return pdu - sizeof(struct nvm_rq);
184}
185
186static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
187{
188 return rqdata + 1;
189}
190
191struct nvm_block;
192
193typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
194typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *);
195typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
196typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
197 nvm_l2p_update_fn *, void *);
198typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int,
199 nvm_bb_update_fn *, void *);
200typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
201typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
202typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *);
203typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *);
204typedef void (nvm_destroy_dma_pool_fn)(void *);
205typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t,
206 dma_addr_t *);
207typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
208
209struct nvm_dev_ops {
210 nvm_id_fn *identity;
211 nvm_get_l2p_tbl_fn *get_l2p_tbl;
212 nvm_op_bb_tbl_fn *get_bb_tbl;
213 nvm_op_set_bb_fn *set_bb;
214
215 nvm_submit_io_fn *submit_io;
216 nvm_erase_blk_fn *erase_block;
217
218 nvm_create_dma_pool_fn *create_dma_pool;
219 nvm_destroy_dma_pool_fn *destroy_dma_pool;
220 nvm_dev_dma_alloc_fn *dev_dma_alloc;
221 nvm_dev_dma_free_fn *dev_dma_free;
222
223 uint8_t max_phys_sect;
224};
225
226struct nvm_lun {
227 int id;
228
229 int lun_id;
230 int chnl_id;
231
232 unsigned int nr_free_blocks; /* Number of unused blocks */
233 struct nvm_block *blocks;
234
235 spinlock_t lock;
236};
237
238struct nvm_block {
239 struct list_head list;
240 struct nvm_lun *lun;
241 unsigned long id;
242
243 void *priv;
244 int type;
245};
246
247struct nvm_dev {
248 struct nvm_dev_ops *ops;
249
250 struct list_head devices;
251 struct list_head online_targets;
252
253 /* Media manager */
254 struct nvmm_type *mt;
255 void *mp;
256
257 /* Device information */
258 int nr_chnls;
259 int nr_planes;
260 int luns_per_chnl;
261 int sec_per_pg; /* only sectors for a single page */
262 int pgs_per_blk;
263 int blks_per_lun;
264 int sec_size;
265 int oob_size;
266 int addr_mode;
267 struct nvm_addr_format addr_format;
268
269 /* Calculated/Cached values. These do not reflect the actual usable
270 * blocks at run-time.
271 */
272 int max_rq_size;
273 int plane_mode; /* drive device in single, double or quad mode */
274
275 int sec_per_pl; /* all sectors across planes */
276 int sec_per_blk;
277 int sec_per_lun;
278
279 unsigned long total_pages;
280 unsigned long total_blocks;
281 int nr_luns;
282 unsigned max_pages_per_blk;
283
284 void *ppalist_pool;
285
286 struct nvm_id identity;
287
288 /* Backend device */
289 struct request_queue *q;
290 char name[DISK_NAME_LEN];
291};
292
293/* fallback conversion */
294static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev,
295 struct ppa_addr r)
296{
297 struct ppa_addr l;
298
299 l.ppa = r.g.sec +
300 r.g.pg * dev->sec_per_pg +
301 r.g.blk * (dev->pgs_per_blk *
302 dev->sec_per_pg) +
303 r.g.lun * (dev->blks_per_lun *
304 dev->pgs_per_blk *
305 dev->sec_per_pg) +
306 r.g.ch * (dev->blks_per_lun *
307 dev->pgs_per_blk *
308 dev->luns_per_chnl *
309 dev->sec_per_pg);
310
311 return l;
312}
313
314/* fallback conversion */
315static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
316 struct ppa_addr r)
317{
318 struct ppa_addr l;
319 int secs, pgs, blks, luns;
320 sector_t ppa = r.ppa;
321
322 l.ppa = 0;
323
324 div_u64_rem(ppa, dev->sec_per_pg, &secs);
325 l.g.sec = secs;
326
327 sector_div(ppa, dev->sec_per_pg);
328 div_u64_rem(ppa, dev->sec_per_blk, &pgs);
329 l.g.pg = pgs;
330
331 sector_div(ppa, dev->pgs_per_blk);
332 div_u64_rem(ppa, dev->blks_per_lun, &blks);
333 l.g.blk = blks;
334
335 sector_div(ppa, dev->blks_per_lun);
336 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
337 l.g.lun = luns;
338
339 sector_div(ppa, dev->luns_per_chnl);
340 l.g.ch = ppa;
341
342 return l;
343}
344
345static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r)
346{
347 struct ppa_addr l;
348
349 l.ppa = 0;
350
351 l.chnl.sec = r.g.sec;
352 l.chnl.pl = r.g.pl;
353 l.chnl.pg = r.g.pg;
354 l.chnl.blk = r.g.blk;
355 l.chnl.lun = r.g.lun;
356 l.chnl.ch = r.g.ch;
357
358 return l;
359}
360
361static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r)
362{
363 struct ppa_addr l;
364
365 l.ppa = 0;
366
367 l.g.sec = r.chnl.sec;
368 l.g.pl = r.chnl.pl;
369 l.g.pg = r.chnl.pg;
370 l.g.blk = r.chnl.blk;
371 l.g.lun = r.chnl.lun;
372 l.g.ch = r.chnl.ch;
373
374 return l;
375}
376
377static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
378 struct ppa_addr gppa)
379{
380 switch (dev->addr_mode) {
381 case NVM_ADDRMODE_LINEAR:
382 return __linear_to_generic_addr(dev, gppa);
383 case NVM_ADDRMODE_CHANNEL:
384 return __chnl_to_generic_addr(gppa);
385 default:
386 BUG();
387 }
388 return gppa;
389}
390
391static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
392 struct ppa_addr gppa)
393{
394 switch (dev->addr_mode) {
395 case NVM_ADDRMODE_LINEAR:
396 return __generic_to_linear_addr(dev, gppa);
397 case NVM_ADDRMODE_CHANNEL:
398 return __generic_to_chnl_addr(gppa);
399 default:
400 BUG();
401 }
402 return gppa;
403}
404
405static inline int ppa_empty(struct ppa_addr ppa_addr)
406{
407 return (ppa_addr.ppa == ADDR_EMPTY);
408}
409
410static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
411{
412 ppa_addr->ppa = ADDR_EMPTY;
413}
414
415static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
416 struct nvm_block *blk)
417{
418 struct ppa_addr ppa;
419 struct nvm_lun *lun = blk->lun;
420
421 ppa.ppa = 0;
422 ppa.g.blk = blk->id % dev->blks_per_lun;
423 ppa.g.lun = lun->lun_id;
424 ppa.g.ch = lun->chnl_id;
425
426 return ppa;
427}
428
Jens Axboedece1632015-11-05 10:41:16 -0700429typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100430typedef sector_t (nvm_tgt_capacity_fn)(void *);
431typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
432typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
433typedef void (nvm_tgt_exit_fn)(void *);
434
435struct nvm_tgt_type {
436 const char *name;
437 unsigned int version[3];
438
439 /* target entry points */
440 nvm_tgt_make_rq_fn *make_rq;
441 nvm_tgt_capacity_fn *capacity;
442 nvm_tgt_end_io_fn *end_io;
443
444 /* module-specific init/teardown */
445 nvm_tgt_init_fn *init;
446 nvm_tgt_exit_fn *exit;
447
448 /* For internal use */
449 struct list_head list;
450};
451
452extern int nvm_register_target(struct nvm_tgt_type *);
453extern void nvm_unregister_target(struct nvm_tgt_type *);
454
455extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
456extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
457
458typedef int (nvmm_register_fn)(struct nvm_dev *);
459typedef void (nvmm_unregister_fn)(struct nvm_dev *);
460typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
461 struct nvm_lun *, unsigned long);
462typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
463typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
464typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
465typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
466typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
467typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
468typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
469 unsigned long);
470typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
471typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *);
472
473struct nvmm_type {
474 const char *name;
475 unsigned int version[3];
476
477 nvmm_register_fn *register_mgr;
478 nvmm_unregister_fn *unregister_mgr;
479
480 /* Block administration callbacks */
481 nvmm_get_blk_fn *get_blk;
482 nvmm_put_blk_fn *put_blk;
483 nvmm_open_blk_fn *open_blk;
484 nvmm_close_blk_fn *close_blk;
485 nvmm_flush_blk_fn *flush_blk;
486
487 nvmm_submit_io_fn *submit_io;
488 nvmm_end_io_fn *end_io;
489 nvmm_erase_blk_fn *erase_blk;
490
491 /* Configuration management */
492 nvmm_get_lun_fn *get_lun;
493
494 /* Statistics */
495 nvmm_free_blocks_print_fn *free_blocks_print;
496 struct list_head list;
497};
498
499extern int nvm_register_mgr(struct nvmm_type *);
500extern void nvm_unregister_mgr(struct nvmm_type *);
501
502extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
503 unsigned long);
504extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
505
506extern int nvm_register(struct request_queue *, char *,
507 struct nvm_dev_ops *);
508extern void nvm_unregister(char *);
509
510extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
511extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
512#else /* CONFIG_NVM */
513struct nvm_dev_ops;
514
515static inline int nvm_register(struct request_queue *q, char *disk_name,
516 struct nvm_dev_ops *ops)
517{
518 return -EINVAL;
519}
520static inline void nvm_unregister(char *disk_name) {}
521#endif /* CONFIG_NVM */
522#endif /* LIGHTNVM.H */