lightnvm: expose mccap in identify command
[linux-2.6-block.git] / include / linux / lightnvm.h
CommitLineData
cd9e9808
MB
1#ifndef NVM_H
2#define NVM_H
3
4enum {
5 NVM_IO_OK = 0,
6 NVM_IO_REQUEUE = 1,
7 NVM_IO_DONE = 2,
8 NVM_IO_ERR = 3,
9
10 NVM_IOTYPE_NONE = 0,
11 NVM_IOTYPE_GC = 1,
12};
13
14#ifdef CONFIG_NVM
15
16#include <linux/blkdev.h>
17#include <linux/types.h>
18#include <linux/file.h>
19#include <linux/dmapool.h>
20
21enum {
22 /* HW Responsibilities */
23 NVM_RSP_L2P = 1 << 0,
24 NVM_RSP_ECC = 1 << 1,
25
26 /* Physical Adressing Mode */
27 NVM_ADDRMODE_LINEAR = 0,
28 NVM_ADDRMODE_CHANNEL = 1,
29
30 /* Plane programming mode for LUN */
31 NVM_PLANE_SINGLE = 0,
32 NVM_PLANE_DOUBLE = 1,
33 NVM_PLANE_QUAD = 2,
34
35 /* Status codes */
36 NVM_RSP_SUCCESS = 0x0,
37 NVM_RSP_NOT_CHANGEABLE = 0x1,
38 NVM_RSP_ERR_FAILWRITE = 0x40ff,
39 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
40
41 /* Device opcodes */
42 NVM_OP_HBREAD = 0x02,
43 NVM_OP_HBWRITE = 0x81,
44 NVM_OP_PWRITE = 0x91,
45 NVM_OP_PREAD = 0x92,
46 NVM_OP_ERASE = 0x90,
47
48 /* PPA Command Flags */
49 NVM_IO_SNGL_ACCESS = 0x0,
50 NVM_IO_DUAL_ACCESS = 0x1,
51 NVM_IO_QUAD_ACCESS = 0x2,
52
53 NVM_IO_SUSPEND = 0x80,
54 NVM_IO_SLC_MODE = 0x100,
55 NVM_IO_SCRAMBLE_DISABLE = 0x200,
56};
57
58struct nvm_id_group {
59 u8 mtype;
60 u8 fmtype;
61 u16 res16;
62 u8 num_ch;
63 u8 num_lun;
64 u8 num_pln;
65 u16 num_blk;
66 u16 num_pg;
67 u16 fpg_sz;
68 u16 csecs;
69 u16 sos;
70 u32 trdt;
71 u32 trdm;
72 u32 tprt;
73 u32 tprm;
74 u32 tbet;
75 u32 tbem;
76 u32 mpos;
12be5edf 77 u32 mccap;
cd9e9808
MB
78 u16 cpar;
79 u8 res[913];
80} __packed;
81
82struct nvm_addr_format {
83 u8 ch_offset;
84 u8 ch_len;
85 u8 lun_offset;
86 u8 lun_len;
87 u8 pln_offset;
88 u8 pln_len;
89 u8 blk_offset;
90 u8 blk_len;
91 u8 pg_offset;
92 u8 pg_len;
93 u8 sect_offset;
94 u8 sect_len;
95 u8 res[4];
96};
97
98struct nvm_id {
99 u8 ver_id;
100 u8 vmnt;
101 u8 cgrps;
102 u8 res[5];
103 u32 cap;
104 u32 dom;
105 struct nvm_addr_format ppaf;
106 u8 ppat;
107 u8 resv[224];
108 struct nvm_id_group groups[4];
109} __packed;
110
111struct nvm_target {
112 struct list_head list;
113 struct nvm_tgt_type *type;
114 struct gendisk *disk;
115};
116
117struct nvm_tgt_instance {
118 struct nvm_tgt_type *tt;
119};
120
121#define ADDR_EMPTY (~0ULL)
122
123#define NVM_VERSION_MAJOR 1
124#define NVM_VERSION_MINOR 0
125#define NVM_VERSION_PATCH 0
126
127#define NVM_SEC_BITS (8)
128#define NVM_PL_BITS (6)
129#define NVM_PG_BITS (16)
130#define NVM_BLK_BITS (16)
131#define NVM_LUN_BITS (10)
132#define NVM_CH_BITS (8)
133
134struct ppa_addr {
135 union {
136 /* Channel-based PPA format in nand 4x2x2x2x8x10 */
137 struct {
b7ceb7d5
MB
138 u64 ch : 4;
139 u64 sec : 2; /* 4 sectors per page */
140 u64 pl : 2; /* 4 planes per LUN */
141 u64 lun : 2; /* 4 LUNs per channel */
142 u64 pg : 8; /* 256 pages per block */
143 u64 blk : 10;/* 1024 blocks per plane */
144 u64 resved : 36;
cd9e9808
MB
145 } chnl;
146
147 /* Generic structure for all addresses */
148 struct {
b7ceb7d5
MB
149 u64 sec : NVM_SEC_BITS;
150 u64 pl : NVM_PL_BITS;
151 u64 pg : NVM_PG_BITS;
152 u64 blk : NVM_BLK_BITS;
153 u64 lun : NVM_LUN_BITS;
154 u64 ch : NVM_CH_BITS;
cd9e9808
MB
155 } g;
156
b7ceb7d5 157 u64 ppa;
cd9e9808
MB
158 };
159} __packed;
160
161struct nvm_rq {
162 struct nvm_tgt_instance *ins;
163 struct nvm_dev *dev;
164
165 struct bio *bio;
166
167 union {
168 struct ppa_addr ppa_addr;
169 dma_addr_t dma_ppa_list;
170 };
171
172 struct ppa_addr *ppa_list;
173
174 void *metadata;
175 dma_addr_t dma_metadata;
176
177 uint8_t opcode;
178 uint16_t nr_pages;
179 uint16_t flags;
180};
181
182static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
183{
184 return pdu - sizeof(struct nvm_rq);
185}
186
187static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
188{
189 return rqdata + 1;
190}
191
192struct nvm_block;
193
194typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
11450469 195typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
cd9e9808
MB
196typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
197typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
198 nvm_l2p_update_fn *, void *);
11450469 199typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int,
cd9e9808
MB
200 nvm_bb_update_fn *, void *);
201typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
202typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
203typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *);
204typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *);
205typedef void (nvm_destroy_dma_pool_fn)(void *);
206typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t,
207 dma_addr_t *);
208typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
209
210struct nvm_dev_ops {
211 nvm_id_fn *identity;
212 nvm_get_l2p_tbl_fn *get_l2p_tbl;
213 nvm_op_bb_tbl_fn *get_bb_tbl;
11450469 214 nvm_op_set_bb_fn *set_bb_tbl;
cd9e9808
MB
215
216 nvm_submit_io_fn *submit_io;
217 nvm_erase_blk_fn *erase_block;
218
219 nvm_create_dma_pool_fn *create_dma_pool;
220 nvm_destroy_dma_pool_fn *destroy_dma_pool;
221 nvm_dev_dma_alloc_fn *dev_dma_alloc;
222 nvm_dev_dma_free_fn *dev_dma_free;
223
aedf17f4 224 unsigned int max_phys_sect;
cd9e9808
MB
225};
226
227struct nvm_lun {
228 int id;
229
230 int lun_id;
231 int chnl_id;
232
233 unsigned int nr_free_blocks; /* Number of unused blocks */
234 struct nvm_block *blocks;
235
236 spinlock_t lock;
237};
238
239struct nvm_block {
240 struct list_head list;
241 struct nvm_lun *lun;
242 unsigned long id;
243
244 void *priv;
245 int type;
246};
247
248struct nvm_dev {
249 struct nvm_dev_ops *ops;
250
251 struct list_head devices;
252 struct list_head online_targets;
253
254 /* Media manager */
255 struct nvmm_type *mt;
256 void *mp;
257
258 /* Device information */
259 int nr_chnls;
260 int nr_planes;
261 int luns_per_chnl;
262 int sec_per_pg; /* only sectors for a single page */
263 int pgs_per_blk;
264 int blks_per_lun;
265 int sec_size;
266 int oob_size;
267 int addr_mode;
268 struct nvm_addr_format addr_format;
269
270 /* Calculated/Cached values. These do not reflect the actual usable
271 * blocks at run-time.
272 */
273 int max_rq_size;
274 int plane_mode; /* drive device in single, double or quad mode */
275
276 int sec_per_pl; /* all sectors across planes */
277 int sec_per_blk;
278 int sec_per_lun;
279
280 unsigned long total_pages;
281 unsigned long total_blocks;
282 int nr_luns;
283 unsigned max_pages_per_blk;
284
285 void *ppalist_pool;
286
287 struct nvm_id identity;
288
289 /* Backend device */
290 struct request_queue *q;
291 char name[DISK_NAME_LEN];
292};
293
294/* fallback conversion */
295static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev,
296 struct ppa_addr r)
297{
298 struct ppa_addr l;
299
300 l.ppa = r.g.sec +
301 r.g.pg * dev->sec_per_pg +
302 r.g.blk * (dev->pgs_per_blk *
303 dev->sec_per_pg) +
304 r.g.lun * (dev->blks_per_lun *
305 dev->pgs_per_blk *
306 dev->sec_per_pg) +
307 r.g.ch * (dev->blks_per_lun *
308 dev->pgs_per_blk *
309 dev->luns_per_chnl *
310 dev->sec_per_pg);
311
312 return l;
313}
314
315/* fallback conversion */
316static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
317 struct ppa_addr r)
318{
319 struct ppa_addr l;
320 int secs, pgs, blks, luns;
321 sector_t ppa = r.ppa;
322
323 l.ppa = 0;
324
325 div_u64_rem(ppa, dev->sec_per_pg, &secs);
326 l.g.sec = secs;
327
328 sector_div(ppa, dev->sec_per_pg);
329 div_u64_rem(ppa, dev->sec_per_blk, &pgs);
330 l.g.pg = pgs;
331
332 sector_div(ppa, dev->pgs_per_blk);
333 div_u64_rem(ppa, dev->blks_per_lun, &blks);
334 l.g.blk = blks;
335
336 sector_div(ppa, dev->blks_per_lun);
337 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
338 l.g.lun = luns;
339
340 sector_div(ppa, dev->luns_per_chnl);
341 l.g.ch = ppa;
342
343 return l;
344}
345
346static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r)
347{
348 struct ppa_addr l;
349
350 l.ppa = 0;
351
352 l.chnl.sec = r.g.sec;
353 l.chnl.pl = r.g.pl;
354 l.chnl.pg = r.g.pg;
355 l.chnl.blk = r.g.blk;
356 l.chnl.lun = r.g.lun;
357 l.chnl.ch = r.g.ch;
358
359 return l;
360}
361
362static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r)
363{
364 struct ppa_addr l;
365
366 l.ppa = 0;
367
368 l.g.sec = r.chnl.sec;
369 l.g.pl = r.chnl.pl;
370 l.g.pg = r.chnl.pg;
371 l.g.blk = r.chnl.blk;
372 l.g.lun = r.chnl.lun;
373 l.g.ch = r.chnl.ch;
374
375 return l;
376}
377
378static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
379 struct ppa_addr gppa)
380{
381 switch (dev->addr_mode) {
382 case NVM_ADDRMODE_LINEAR:
383 return __linear_to_generic_addr(dev, gppa);
384 case NVM_ADDRMODE_CHANNEL:
385 return __chnl_to_generic_addr(gppa);
386 default:
387 BUG();
388 }
389 return gppa;
390}
391
392static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
393 struct ppa_addr gppa)
394{
395 switch (dev->addr_mode) {
396 case NVM_ADDRMODE_LINEAR:
397 return __generic_to_linear_addr(dev, gppa);
398 case NVM_ADDRMODE_CHANNEL:
399 return __generic_to_chnl_addr(gppa);
400 default:
401 BUG();
402 }
403 return gppa;
404}
405
406static inline int ppa_empty(struct ppa_addr ppa_addr)
407{
408 return (ppa_addr.ppa == ADDR_EMPTY);
409}
410
411static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
412{
413 ppa_addr->ppa = ADDR_EMPTY;
414}
415
416static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
417 struct nvm_block *blk)
418{
419 struct ppa_addr ppa;
420 struct nvm_lun *lun = blk->lun;
421
422 ppa.ppa = 0;
423 ppa.g.blk = blk->id % dev->blks_per_lun;
424 ppa.g.lun = lun->lun_id;
425 ppa.g.ch = lun->chnl_id;
426
427 return ppa;
428}
429
dece1635 430typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
cd9e9808
MB
431typedef sector_t (nvm_tgt_capacity_fn)(void *);
432typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
433typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
434typedef void (nvm_tgt_exit_fn)(void *);
435
436struct nvm_tgt_type {
437 const char *name;
438 unsigned int version[3];
439
440 /* target entry points */
441 nvm_tgt_make_rq_fn *make_rq;
442 nvm_tgt_capacity_fn *capacity;
443 nvm_tgt_end_io_fn *end_io;
444
445 /* module-specific init/teardown */
446 nvm_tgt_init_fn *init;
447 nvm_tgt_exit_fn *exit;
448
449 /* For internal use */
450 struct list_head list;
451};
452
453extern int nvm_register_target(struct nvm_tgt_type *);
454extern void nvm_unregister_target(struct nvm_tgt_type *);
455
456extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
457extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
458
459typedef int (nvmm_register_fn)(struct nvm_dev *);
460typedef void (nvmm_unregister_fn)(struct nvm_dev *);
461typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
462 struct nvm_lun *, unsigned long);
463typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
464typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
465typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
466typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
467typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
468typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
469typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
470 unsigned long);
471typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
472typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *);
473
474struct nvmm_type {
475 const char *name;
476 unsigned int version[3];
477
478 nvmm_register_fn *register_mgr;
479 nvmm_unregister_fn *unregister_mgr;
480
481 /* Block administration callbacks */
482 nvmm_get_blk_fn *get_blk;
483 nvmm_put_blk_fn *put_blk;
484 nvmm_open_blk_fn *open_blk;
485 nvmm_close_blk_fn *close_blk;
486 nvmm_flush_blk_fn *flush_blk;
487
488 nvmm_submit_io_fn *submit_io;
489 nvmm_end_io_fn *end_io;
490 nvmm_erase_blk_fn *erase_blk;
491
492 /* Configuration management */
493 nvmm_get_lun_fn *get_lun;
494
495 /* Statistics */
496 nvmm_free_blocks_print_fn *free_blocks_print;
497 struct list_head list;
498};
499
500extern int nvm_register_mgr(struct nvmm_type *);
501extern void nvm_unregister_mgr(struct nvmm_type *);
502
503extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
504 unsigned long);
505extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
506
507extern int nvm_register(struct request_queue *, char *,
508 struct nvm_dev_ops *);
509extern void nvm_unregister(char *);
510
511extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
512extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
513#else /* CONFIG_NVM */
514struct nvm_dev_ops;
515
516static inline int nvm_register(struct request_queue *q, char *disk_name,
517 struct nvm_dev_ops *ops)
518{
519 return -EINVAL;
520}
521static inline void nvm_unregister(char *disk_name) {}
522#endif /* CONFIG_NVM */
523#endif /* LIGHTNVM.H */