lightnvm: prevent double free on init error
[linux-2.6-block.git] / include / linux / lightnvm.h
CommitLineData
cd9e9808
MB
1#ifndef NVM_H
2#define NVM_H
3
4enum {
5 NVM_IO_OK = 0,
6 NVM_IO_REQUEUE = 1,
7 NVM_IO_DONE = 2,
8 NVM_IO_ERR = 3,
9
10 NVM_IOTYPE_NONE = 0,
11 NVM_IOTYPE_GC = 1,
12};
13
14#ifdef CONFIG_NVM
15
16#include <linux/blkdev.h>
17#include <linux/types.h>
18#include <linux/file.h>
19#include <linux/dmapool.h>
20
21enum {
22 /* HW Responsibilities */
23 NVM_RSP_L2P = 1 << 0,
24 NVM_RSP_ECC = 1 << 1,
25
26 /* Physical Adressing Mode */
27 NVM_ADDRMODE_LINEAR = 0,
28 NVM_ADDRMODE_CHANNEL = 1,
29
30 /* Plane programming mode for LUN */
31 NVM_PLANE_SINGLE = 0,
32 NVM_PLANE_DOUBLE = 1,
33 NVM_PLANE_QUAD = 2,
34
35 /* Status codes */
36 NVM_RSP_SUCCESS = 0x0,
37 NVM_RSP_NOT_CHANGEABLE = 0x1,
38 NVM_RSP_ERR_FAILWRITE = 0x40ff,
39 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
40
41 /* Device opcodes */
42 NVM_OP_HBREAD = 0x02,
43 NVM_OP_HBWRITE = 0x81,
44 NVM_OP_PWRITE = 0x91,
45 NVM_OP_PREAD = 0x92,
46 NVM_OP_ERASE = 0x90,
47
48 /* PPA Command Flags */
49 NVM_IO_SNGL_ACCESS = 0x0,
50 NVM_IO_DUAL_ACCESS = 0x1,
51 NVM_IO_QUAD_ACCESS = 0x2,
52
53 NVM_IO_SUSPEND = 0x80,
54 NVM_IO_SLC_MODE = 0x100,
55 NVM_IO_SCRAMBLE_DISABLE = 0x200,
56};
57
58struct nvm_id_group {
59 u8 mtype;
60 u8 fmtype;
cd9e9808
MB
61 u8 num_ch;
62 u8 num_lun;
63 u8 num_pln;
64 u16 num_blk;
65 u16 num_pg;
66 u16 fpg_sz;
67 u16 csecs;
68 u16 sos;
69 u32 trdt;
70 u32 trdm;
71 u32 tprt;
72 u32 tprm;
73 u32 tbet;
74 u32 tbem;
75 u32 mpos;
12be5edf 76 u32 mccap;
cd9e9808 77 u16 cpar;
73387e7b 78};
cd9e9808
MB
79
80struct nvm_addr_format {
81 u8 ch_offset;
82 u8 ch_len;
83 u8 lun_offset;
84 u8 lun_len;
85 u8 pln_offset;
86 u8 pln_len;
87 u8 blk_offset;
88 u8 blk_len;
89 u8 pg_offset;
90 u8 pg_len;
91 u8 sect_offset;
92 u8 sect_len;
cd9e9808
MB
93};
94
95struct nvm_id {
96 u8 ver_id;
97 u8 vmnt;
98 u8 cgrps;
cd9e9808
MB
99 u32 cap;
100 u32 dom;
101 struct nvm_addr_format ppaf;
102 u8 ppat;
cd9e9808
MB
103 struct nvm_id_group groups[4];
104} __packed;
105
106struct nvm_target {
107 struct list_head list;
108 struct nvm_tgt_type *type;
109 struct gendisk *disk;
110};
111
112struct nvm_tgt_instance {
113 struct nvm_tgt_type *tt;
114};
115
116#define ADDR_EMPTY (~0ULL)
117
118#define NVM_VERSION_MAJOR 1
119#define NVM_VERSION_MINOR 0
120#define NVM_VERSION_PATCH 0
121
122#define NVM_SEC_BITS (8)
123#define NVM_PL_BITS (6)
124#define NVM_PG_BITS (16)
125#define NVM_BLK_BITS (16)
126#define NVM_LUN_BITS (10)
127#define NVM_CH_BITS (8)
128
129struct ppa_addr {
130 union {
131 /* Channel-based PPA format in nand 4x2x2x2x8x10 */
132 struct {
b7ceb7d5
MB
133 u64 ch : 4;
134 u64 sec : 2; /* 4 sectors per page */
135 u64 pl : 2; /* 4 planes per LUN */
136 u64 lun : 2; /* 4 LUNs per channel */
137 u64 pg : 8; /* 256 pages per block */
138 u64 blk : 10;/* 1024 blocks per plane */
139 u64 resved : 36;
cd9e9808
MB
140 } chnl;
141
142 /* Generic structure for all addresses */
143 struct {
b7ceb7d5
MB
144 u64 sec : NVM_SEC_BITS;
145 u64 pl : NVM_PL_BITS;
146 u64 pg : NVM_PG_BITS;
147 u64 blk : NVM_BLK_BITS;
148 u64 lun : NVM_LUN_BITS;
149 u64 ch : NVM_CH_BITS;
cd9e9808
MB
150 } g;
151
b7ceb7d5 152 u64 ppa;
cd9e9808
MB
153 };
154} __packed;
155
156struct nvm_rq {
157 struct nvm_tgt_instance *ins;
158 struct nvm_dev *dev;
159
160 struct bio *bio;
161
162 union {
163 struct ppa_addr ppa_addr;
164 dma_addr_t dma_ppa_list;
165 };
166
167 struct ppa_addr *ppa_list;
168
169 void *metadata;
170 dma_addr_t dma_metadata;
171
172 uint8_t opcode;
173 uint16_t nr_pages;
174 uint16_t flags;
175};
176
177static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
178{
179 return pdu - sizeof(struct nvm_rq);
180}
181
182static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
183{
184 return rqdata + 1;
185}
186
187struct nvm_block;
188
189typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
11450469 190typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
cd9e9808
MB
191typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
192typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
193 nvm_l2p_update_fn *, void *);
11450469 194typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int,
cd9e9808
MB
195 nvm_bb_update_fn *, void *);
196typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
197typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
198typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *);
199typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *);
200typedef void (nvm_destroy_dma_pool_fn)(void *);
201typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t,
202 dma_addr_t *);
203typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
204
205struct nvm_dev_ops {
206 nvm_id_fn *identity;
207 nvm_get_l2p_tbl_fn *get_l2p_tbl;
208 nvm_op_bb_tbl_fn *get_bb_tbl;
11450469 209 nvm_op_set_bb_fn *set_bb_tbl;
cd9e9808
MB
210
211 nvm_submit_io_fn *submit_io;
212 nvm_erase_blk_fn *erase_block;
213
214 nvm_create_dma_pool_fn *create_dma_pool;
215 nvm_destroy_dma_pool_fn *destroy_dma_pool;
216 nvm_dev_dma_alloc_fn *dev_dma_alloc;
217 nvm_dev_dma_free_fn *dev_dma_free;
218
aedf17f4 219 unsigned int max_phys_sect;
cd9e9808
MB
220};
221
222struct nvm_lun {
223 int id;
224
225 int lun_id;
226 int chnl_id;
227
228 unsigned int nr_free_blocks; /* Number of unused blocks */
229 struct nvm_block *blocks;
230
231 spinlock_t lock;
232};
233
234struct nvm_block {
235 struct list_head list;
236 struct nvm_lun *lun;
237 unsigned long id;
238
239 void *priv;
240 int type;
241};
242
243struct nvm_dev {
244 struct nvm_dev_ops *ops;
245
246 struct list_head devices;
247 struct list_head online_targets;
248
249 /* Media manager */
250 struct nvmm_type *mt;
251 void *mp;
252
253 /* Device information */
254 int nr_chnls;
255 int nr_planes;
256 int luns_per_chnl;
257 int sec_per_pg; /* only sectors for a single page */
258 int pgs_per_blk;
259 int blks_per_lun;
260 int sec_size;
261 int oob_size;
262 int addr_mode;
263 struct nvm_addr_format addr_format;
264
265 /* Calculated/Cached values. These do not reflect the actual usable
266 * blocks at run-time.
267 */
268 int max_rq_size;
269 int plane_mode; /* drive device in single, double or quad mode */
270
271 int sec_per_pl; /* all sectors across planes */
272 int sec_per_blk;
273 int sec_per_lun;
274
275 unsigned long total_pages;
276 unsigned long total_blocks;
277 int nr_luns;
278 unsigned max_pages_per_blk;
279
280 void *ppalist_pool;
281
282 struct nvm_id identity;
283
284 /* Backend device */
285 struct request_queue *q;
286 char name[DISK_NAME_LEN];
287};
288
289/* fallback conversion */
290static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev,
291 struct ppa_addr r)
292{
293 struct ppa_addr l;
294
295 l.ppa = r.g.sec +
296 r.g.pg * dev->sec_per_pg +
297 r.g.blk * (dev->pgs_per_blk *
298 dev->sec_per_pg) +
299 r.g.lun * (dev->blks_per_lun *
300 dev->pgs_per_blk *
301 dev->sec_per_pg) +
302 r.g.ch * (dev->blks_per_lun *
303 dev->pgs_per_blk *
304 dev->luns_per_chnl *
305 dev->sec_per_pg);
306
307 return l;
308}
309
310/* fallback conversion */
311static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
312 struct ppa_addr r)
313{
314 struct ppa_addr l;
315 int secs, pgs, blks, luns;
316 sector_t ppa = r.ppa;
317
318 l.ppa = 0;
319
320 div_u64_rem(ppa, dev->sec_per_pg, &secs);
321 l.g.sec = secs;
322
323 sector_div(ppa, dev->sec_per_pg);
324 div_u64_rem(ppa, dev->sec_per_blk, &pgs);
325 l.g.pg = pgs;
326
327 sector_div(ppa, dev->pgs_per_blk);
328 div_u64_rem(ppa, dev->blks_per_lun, &blks);
329 l.g.blk = blks;
330
331 sector_div(ppa, dev->blks_per_lun);
332 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
333 l.g.lun = luns;
334
335 sector_div(ppa, dev->luns_per_chnl);
336 l.g.ch = ppa;
337
338 return l;
339}
340
341static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r)
342{
343 struct ppa_addr l;
344
345 l.ppa = 0;
346
347 l.chnl.sec = r.g.sec;
348 l.chnl.pl = r.g.pl;
349 l.chnl.pg = r.g.pg;
350 l.chnl.blk = r.g.blk;
351 l.chnl.lun = r.g.lun;
352 l.chnl.ch = r.g.ch;
353
354 return l;
355}
356
357static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r)
358{
359 struct ppa_addr l;
360
361 l.ppa = 0;
362
363 l.g.sec = r.chnl.sec;
364 l.g.pl = r.chnl.pl;
365 l.g.pg = r.chnl.pg;
366 l.g.blk = r.chnl.blk;
367 l.g.lun = r.chnl.lun;
368 l.g.ch = r.chnl.ch;
369
370 return l;
371}
372
373static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
374 struct ppa_addr gppa)
375{
376 switch (dev->addr_mode) {
377 case NVM_ADDRMODE_LINEAR:
378 return __linear_to_generic_addr(dev, gppa);
379 case NVM_ADDRMODE_CHANNEL:
380 return __chnl_to_generic_addr(gppa);
381 default:
382 BUG();
383 }
384 return gppa;
385}
386
387static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
388 struct ppa_addr gppa)
389{
390 switch (dev->addr_mode) {
391 case NVM_ADDRMODE_LINEAR:
392 return __generic_to_linear_addr(dev, gppa);
393 case NVM_ADDRMODE_CHANNEL:
394 return __generic_to_chnl_addr(gppa);
395 default:
396 BUG();
397 }
398 return gppa;
399}
400
401static inline int ppa_empty(struct ppa_addr ppa_addr)
402{
403 return (ppa_addr.ppa == ADDR_EMPTY);
404}
405
406static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
407{
408 ppa_addr->ppa = ADDR_EMPTY;
409}
410
411static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
412 struct nvm_block *blk)
413{
414 struct ppa_addr ppa;
415 struct nvm_lun *lun = blk->lun;
416
417 ppa.ppa = 0;
418 ppa.g.blk = blk->id % dev->blks_per_lun;
419 ppa.g.lun = lun->lun_id;
420 ppa.g.ch = lun->chnl_id;
421
422 return ppa;
423}
424
dece1635 425typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
cd9e9808
MB
426typedef sector_t (nvm_tgt_capacity_fn)(void *);
427typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
428typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
429typedef void (nvm_tgt_exit_fn)(void *);
430
431struct nvm_tgt_type {
432 const char *name;
433 unsigned int version[3];
434
435 /* target entry points */
436 nvm_tgt_make_rq_fn *make_rq;
437 nvm_tgt_capacity_fn *capacity;
438 nvm_tgt_end_io_fn *end_io;
439
440 /* module-specific init/teardown */
441 nvm_tgt_init_fn *init;
442 nvm_tgt_exit_fn *exit;
443
444 /* For internal use */
445 struct list_head list;
446};
447
448extern int nvm_register_target(struct nvm_tgt_type *);
449extern void nvm_unregister_target(struct nvm_tgt_type *);
450
451extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
452extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
453
454typedef int (nvmm_register_fn)(struct nvm_dev *);
455typedef void (nvmm_unregister_fn)(struct nvm_dev *);
456typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
457 struct nvm_lun *, unsigned long);
458typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
459typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
460typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
461typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
462typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
463typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
464typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
465 unsigned long);
466typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
467typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *);
468
469struct nvmm_type {
470 const char *name;
471 unsigned int version[3];
472
473 nvmm_register_fn *register_mgr;
474 nvmm_unregister_fn *unregister_mgr;
475
476 /* Block administration callbacks */
477 nvmm_get_blk_fn *get_blk;
478 nvmm_put_blk_fn *put_blk;
479 nvmm_open_blk_fn *open_blk;
480 nvmm_close_blk_fn *close_blk;
481 nvmm_flush_blk_fn *flush_blk;
482
483 nvmm_submit_io_fn *submit_io;
484 nvmm_end_io_fn *end_io;
485 nvmm_erase_blk_fn *erase_blk;
486
487 /* Configuration management */
488 nvmm_get_lun_fn *get_lun;
489
490 /* Statistics */
491 nvmm_free_blocks_print_fn *free_blocks_print;
492 struct list_head list;
493};
494
495extern int nvm_register_mgr(struct nvmm_type *);
496extern void nvm_unregister_mgr(struct nvmm_type *);
497
498extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
499 unsigned long);
500extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
501
502extern int nvm_register(struct request_queue *, char *,
503 struct nvm_dev_ops *);
504extern void nvm_unregister(char *);
505
506extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
507extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
508#else /* CONFIG_NVM */
509struct nvm_dev_ops;
510
511static inline int nvm_register(struct request_queue *q, char *disk_name,
512 struct nvm_dev_ops *ops)
513{
514 return -EINVAL;
515}
516static inline void nvm_unregister(char *disk_name) {}
517#endif /* CONFIG_NVM */
518#endif /* LIGHTNVM.H */