Commit | Line | Data |
---|---|---|
cd9e9808 MB |
1 | #ifndef NVM_H |
2 | #define NVM_H | |
3 | ||
a7fd9a4f JA |
4 | #include <linux/types.h> |
5 | ||
cd9e9808 MB |
6 | enum { |
7 | NVM_IO_OK = 0, | |
8 | NVM_IO_REQUEUE = 1, | |
9 | NVM_IO_DONE = 2, | |
10 | NVM_IO_ERR = 3, | |
11 | ||
12 | NVM_IOTYPE_NONE = 0, | |
13 | NVM_IOTYPE_GC = 1, | |
14 | }; | |
15 | ||
a7fd9a4f JA |
16 | #define NVM_BLK_BITS (16) |
17 | #define NVM_PG_BITS (16) | |
18 | #define NVM_SEC_BITS (8) | |
19 | #define NVM_PL_BITS (8) | |
20 | #define NVM_LUN_BITS (8) | |
21 | #define NVM_CH_BITS (8) | |
22 | ||
23 | struct ppa_addr { | |
24 | /* Generic structure for all addresses */ | |
25 | union { | |
26 | struct { | |
27 | u64 blk : NVM_BLK_BITS; | |
28 | u64 pg : NVM_PG_BITS; | |
29 | u64 sec : NVM_SEC_BITS; | |
30 | u64 pl : NVM_PL_BITS; | |
31 | u64 lun : NVM_LUN_BITS; | |
32 | u64 ch : NVM_CH_BITS; | |
33 | } g; | |
34 | ||
35 | u64 ppa; | |
36 | }; | |
37 | }; | |
38 | ||
39 | struct nvm_rq; | |
40 | struct nvm_id; | |
41 | struct nvm_dev; | |
42 | ||
43 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); | |
22e8c976 MB |
44 | typedef int (nvm_bb_update_fn)(struct nvm_dev *, struct ppa_addr, u8 *, int, |
45 | void *); | |
a7fd9a4f JA |
46 | typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); |
47 | typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, | |
48 | nvm_l2p_update_fn *, void *); | |
22e8c976 | 49 | typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, |
a7fd9a4f JA |
50 | nvm_bb_update_fn *, void *); |
51 | typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int); | |
52 | typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); | |
53 | typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *); | |
54 | typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); | |
55 | typedef void (nvm_destroy_dma_pool_fn)(void *); | |
56 | typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, | |
57 | dma_addr_t *); | |
58 | typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); | |
59 | ||
60 | struct nvm_dev_ops { | |
61 | nvm_id_fn *identity; | |
62 | nvm_get_l2p_tbl_fn *get_l2p_tbl; | |
63 | nvm_op_bb_tbl_fn *get_bb_tbl; | |
64 | nvm_op_set_bb_fn *set_bb_tbl; | |
65 | ||
66 | nvm_submit_io_fn *submit_io; | |
67 | nvm_erase_blk_fn *erase_block; | |
68 | ||
69 | nvm_create_dma_pool_fn *create_dma_pool; | |
70 | nvm_destroy_dma_pool_fn *destroy_dma_pool; | |
71 | nvm_dev_dma_alloc_fn *dev_dma_alloc; | |
72 | nvm_dev_dma_free_fn *dev_dma_free; | |
73 | ||
74 | unsigned int max_phys_sect; | |
75 | }; | |
76 | ||
77 | ||
78 | ||
cd9e9808 MB |
79 | #ifdef CONFIG_NVM |
80 | ||
81 | #include <linux/blkdev.h> | |
cd9e9808 MB |
82 | #include <linux/file.h> |
83 | #include <linux/dmapool.h> | |
e3eb3799 | 84 | #include <uapi/linux/lightnvm.h> |
cd9e9808 MB |
85 | |
86 | enum { | |
87 | /* HW Responsibilities */ | |
88 | NVM_RSP_L2P = 1 << 0, | |
89 | NVM_RSP_ECC = 1 << 1, | |
90 | ||
91 | /* Physical Adressing Mode */ | |
92 | NVM_ADDRMODE_LINEAR = 0, | |
93 | NVM_ADDRMODE_CHANNEL = 1, | |
94 | ||
95 | /* Plane programming mode for LUN */ | |
d5bdec8d MB |
96 | NVM_PLANE_SINGLE = 1, |
97 | NVM_PLANE_DOUBLE = 2, | |
98 | NVM_PLANE_QUAD = 4, | |
cd9e9808 MB |
99 | |
100 | /* Status codes */ | |
101 | NVM_RSP_SUCCESS = 0x0, | |
102 | NVM_RSP_NOT_CHANGEABLE = 0x1, | |
103 | NVM_RSP_ERR_FAILWRITE = 0x40ff, | |
104 | NVM_RSP_ERR_EMPTYPAGE = 0x42ff, | |
105 | ||
106 | /* Device opcodes */ | |
107 | NVM_OP_HBREAD = 0x02, | |
108 | NVM_OP_HBWRITE = 0x81, | |
109 | NVM_OP_PWRITE = 0x91, | |
110 | NVM_OP_PREAD = 0x92, | |
111 | NVM_OP_ERASE = 0x90, | |
112 | ||
113 | /* PPA Command Flags */ | |
114 | NVM_IO_SNGL_ACCESS = 0x0, | |
115 | NVM_IO_DUAL_ACCESS = 0x1, | |
116 | NVM_IO_QUAD_ACCESS = 0x2, | |
117 | ||
57b4bd06 | 118 | /* NAND Access Modes */ |
cd9e9808 MB |
119 | NVM_IO_SUSPEND = 0x80, |
120 | NVM_IO_SLC_MODE = 0x100, | |
121 | NVM_IO_SCRAMBLE_DISABLE = 0x200, | |
57b4bd06 MB |
122 | |
123 | /* Block Types */ | |
124 | NVM_BLK_T_FREE = 0x0, | |
125 | NVM_BLK_T_BAD = 0x1, | |
b5d4acd4 MB |
126 | NVM_BLK_T_GRWN_BAD = 0x2, |
127 | NVM_BLK_T_DEV = 0x4, | |
128 | NVM_BLK_T_HOST = 0x8, | |
f9a99950 MB |
129 | |
130 | /* Memory capabilities */ | |
131 | NVM_ID_CAP_SLC = 0x1, | |
132 | NVM_ID_CAP_CMD_SUSPEND = 0x2, | |
133 | NVM_ID_CAP_SCRAMBLE = 0x4, | |
134 | NVM_ID_CAP_ENCRYPT = 0x8, | |
ca5927e7 MB |
135 | |
136 | /* Memory types */ | |
137 | NVM_ID_FMTYPE_SLC = 0, | |
138 | NVM_ID_FMTYPE_MLC = 1, | |
bf643185 MB |
139 | |
140 | /* Device capabilities */ | |
141 | NVM_ID_DCAP_BBLKMGMT = 0x1, | |
142 | NVM_UD_DCAP_ECC = 0x2, | |
ca5927e7 MB |
143 | }; |
144 | ||
145 | struct nvm_id_lp_mlc { | |
146 | u16 num_pairs; | |
147 | u8 pairs[886]; | |
148 | }; | |
149 | ||
150 | struct nvm_id_lp_tbl { | |
151 | __u8 id[8]; | |
152 | struct nvm_id_lp_mlc mlc; | |
cd9e9808 MB |
153 | }; |
154 | ||
155 | struct nvm_id_group { | |
156 | u8 mtype; | |
157 | u8 fmtype; | |
cd9e9808 MB |
158 | u8 num_ch; |
159 | u8 num_lun; | |
160 | u8 num_pln; | |
161 | u16 num_blk; | |
162 | u16 num_pg; | |
163 | u16 fpg_sz; | |
164 | u16 csecs; | |
165 | u16 sos; | |
166 | u32 trdt; | |
167 | u32 trdm; | |
168 | u32 tprt; | |
169 | u32 tprm; | |
170 | u32 tbet; | |
171 | u32 tbem; | |
172 | u32 mpos; | |
12be5edf | 173 | u32 mccap; |
cd9e9808 | 174 | u16 cpar; |
ca5927e7 MB |
175 | |
176 | struct nvm_id_lp_tbl lptbl; | |
73387e7b | 177 | }; |
cd9e9808 MB |
178 | |
179 | struct nvm_addr_format { | |
180 | u8 ch_offset; | |
181 | u8 ch_len; | |
182 | u8 lun_offset; | |
183 | u8 lun_len; | |
184 | u8 pln_offset; | |
185 | u8 pln_len; | |
186 | u8 blk_offset; | |
187 | u8 blk_len; | |
188 | u8 pg_offset; | |
189 | u8 pg_len; | |
190 | u8 sect_offset; | |
191 | u8 sect_len; | |
cd9e9808 MB |
192 | }; |
193 | ||
194 | struct nvm_id { | |
195 | u8 ver_id; | |
196 | u8 vmnt; | |
197 | u8 cgrps; | |
cd9e9808 MB |
198 | u32 cap; |
199 | u32 dom; | |
200 | struct nvm_addr_format ppaf; | |
cd9e9808 MB |
201 | struct nvm_id_group groups[4]; |
202 | } __packed; | |
203 | ||
204 | struct nvm_target { | |
205 | struct list_head list; | |
206 | struct nvm_tgt_type *type; | |
207 | struct gendisk *disk; | |
208 | }; | |
209 | ||
210 | struct nvm_tgt_instance { | |
211 | struct nvm_tgt_type *tt; | |
212 | }; | |
213 | ||
214 | #define ADDR_EMPTY (~0ULL) | |
215 | ||
216 | #define NVM_VERSION_MAJOR 1 | |
217 | #define NVM_VERSION_MINOR 0 | |
218 | #define NVM_VERSION_PATCH 0 | |
219 | ||
91276162 | 220 | struct nvm_rq; |
72d256ec | 221 | typedef void (nvm_end_io_fn)(struct nvm_rq *); |
91276162 | 222 | |
cd9e9808 MB |
223 | struct nvm_rq { |
224 | struct nvm_tgt_instance *ins; | |
225 | struct nvm_dev *dev; | |
226 | ||
227 | struct bio *bio; | |
228 | ||
229 | union { | |
230 | struct ppa_addr ppa_addr; | |
231 | dma_addr_t dma_ppa_list; | |
232 | }; | |
233 | ||
234 | struct ppa_addr *ppa_list; | |
235 | ||
236 | void *metadata; | |
237 | dma_addr_t dma_metadata; | |
238 | ||
91276162 MB |
239 | struct completion *wait; |
240 | nvm_end_io_fn *end_io; | |
241 | ||
cd9e9808 MB |
242 | uint8_t opcode; |
243 | uint16_t nr_pages; | |
244 | uint16_t flags; | |
72d256ec | 245 | |
9f867268 | 246 | u64 ppa_status; /* ppa media status */ |
72d256ec | 247 | int error; |
cd9e9808 MB |
248 | }; |
249 | ||
250 | static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) | |
251 | { | |
252 | return pdu - sizeof(struct nvm_rq); | |
253 | } | |
254 | ||
255 | static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) | |
256 | { | |
257 | return rqdata + 1; | |
258 | } | |
259 | ||
260 | struct nvm_block; | |
261 | ||
cd9e9808 MB |
262 | struct nvm_lun { |
263 | int id; | |
264 | ||
265 | int lun_id; | |
266 | int chnl_id; | |
267 | ||
ff0e498b JG |
268 | /* It is up to the target to mark blocks as closed. If the target does |
269 | * not do it, all blocks are marked as open, and nr_open_blocks | |
270 | * represents the number of blocks in use | |
271 | */ | |
272 | unsigned int nr_open_blocks; /* Number of used, writable blocks */ | |
273 | unsigned int nr_closed_blocks; /* Number of used, read-only blocks */ | |
cd9e9808 | 274 | unsigned int nr_free_blocks; /* Number of unused blocks */ |
0b59733b | 275 | unsigned int nr_bad_blocks; /* Number of bad blocks */ |
cd9e9808 MB |
276 | |
277 | spinlock_t lock; | |
ff0e498b JG |
278 | |
279 | struct nvm_block *blocks; | |
280 | }; | |
281 | ||
282 | enum { | |
283 | NVM_BLK_ST_FREE = 0x1, /* Free block */ | |
284 | NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */ | |
285 | NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */ | |
286 | NVM_BLK_ST_BAD = 0x8, /* Bad block */ | |
cd9e9808 MB |
287 | }; |
288 | ||
289 | struct nvm_block { | |
290 | struct list_head list; | |
291 | struct nvm_lun *lun; | |
292 | unsigned long id; | |
293 | ||
294 | void *priv; | |
ff0e498b | 295 | int state; |
cd9e9808 MB |
296 | }; |
297 | ||
e3eb3799 MB |
298 | /* system block cpu representation */ |
299 | struct nvm_sb_info { | |
300 | unsigned long seqnr; | |
301 | unsigned long erase_cnt; | |
302 | unsigned int version; | |
303 | char mmtype[NVM_MMTYPE_LEN]; | |
304 | struct ppa_addr fs_ppa; | |
305 | }; | |
306 | ||
cd9e9808 MB |
307 | struct nvm_dev { |
308 | struct nvm_dev_ops *ops; | |
309 | ||
310 | struct list_head devices; | |
311 | struct list_head online_targets; | |
312 | ||
313 | /* Media manager */ | |
314 | struct nvmm_type *mt; | |
315 | void *mp; | |
316 | ||
b7692076 MB |
317 | /* System blocks */ |
318 | struct nvm_sb_info sb; | |
319 | ||
cd9e9808 MB |
320 | /* Device information */ |
321 | int nr_chnls; | |
322 | int nr_planes; | |
323 | int luns_per_chnl; | |
324 | int sec_per_pg; /* only sectors for a single page */ | |
325 | int pgs_per_blk; | |
326 | int blks_per_lun; | |
4891d120 MB |
327 | int fpg_size; |
328 | int pfpg_size; /* size of buffer if all pages are to be read */ | |
cd9e9808 MB |
329 | int sec_size; |
330 | int oob_size; | |
f9a99950 | 331 | int mccap; |
7386af27 | 332 | struct nvm_addr_format ppaf; |
cd9e9808 MB |
333 | |
334 | /* Calculated/Cached values. These do not reflect the actual usable | |
335 | * blocks at run-time. | |
336 | */ | |
337 | int max_rq_size; | |
338 | int plane_mode; /* drive device in single, double or quad mode */ | |
339 | ||
340 | int sec_per_pl; /* all sectors across planes */ | |
341 | int sec_per_blk; | |
342 | int sec_per_lun; | |
343 | ||
ca5927e7 MB |
344 | /* lower page table */ |
345 | int lps_per_blk; | |
346 | int *lptbl; | |
347 | ||
cd9e9808 | 348 | unsigned long total_blocks; |
4ece44af | 349 | unsigned long total_secs; |
cd9e9808 MB |
350 | int nr_luns; |
351 | unsigned max_pages_per_blk; | |
352 | ||
da1e2849 | 353 | unsigned long *lun_map; |
cd9e9808 MB |
354 | void *ppalist_pool; |
355 | ||
356 | struct nvm_id identity; | |
357 | ||
358 | /* Backend device */ | |
359 | struct request_queue *q; | |
360 | char name[DISK_NAME_LEN]; | |
e3eb3799 MB |
361 | |
362 | struct mutex mlock; | |
4c9dacb8 | 363 | spinlock_t lock; |
cd9e9808 MB |
364 | }; |
365 | ||
7386af27 MB |
366 | static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, |
367 | struct ppa_addr r) | |
cd9e9808 MB |
368 | { |
369 | struct ppa_addr l; | |
370 | ||
7386af27 MB |
371 | l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; |
372 | l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; | |
373 | l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; | |
374 | l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; | |
375 | l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; | |
376 | l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; | |
cd9e9808 MB |
377 | |
378 | return l; | |
379 | } | |
380 | ||
7386af27 MB |
381 | static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, |
382 | struct ppa_addr r) | |
cd9e9808 MB |
383 | { |
384 | struct ppa_addr l; | |
385 | ||
7386af27 MB |
386 | /* |
387 | * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. | |
388 | */ | |
389 | l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & | |
390 | (((1 << dev->ppaf.blk_len) - 1)); | |
391 | l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & | |
392 | (((1 << dev->ppaf.pg_len) - 1)); | |
393 | l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & | |
394 | (((1 << dev->ppaf.sect_len) - 1)); | |
395 | l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & | |
396 | (((1 << dev->ppaf.pln_len) - 1)); | |
397 | l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & | |
398 | (((1 << dev->ppaf.lun_len) - 1)); | |
399 | l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & | |
400 | (((1 << dev->ppaf.ch_len) - 1)); | |
cd9e9808 MB |
401 | |
402 | return l; | |
403 | } | |
404 | ||
cd9e9808 MB |
405 | static inline int ppa_empty(struct ppa_addr ppa_addr) |
406 | { | |
407 | return (ppa_addr.ppa == ADDR_EMPTY); | |
408 | } | |
409 | ||
410 | static inline void ppa_set_empty(struct ppa_addr *ppa_addr) | |
411 | { | |
412 | ppa_addr->ppa = ADDR_EMPTY; | |
413 | } | |
414 | ||
415 | static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev, | |
416 | struct nvm_block *blk) | |
417 | { | |
418 | struct ppa_addr ppa; | |
419 | struct nvm_lun *lun = blk->lun; | |
420 | ||
421 | ppa.ppa = 0; | |
422 | ppa.g.blk = blk->id % dev->blks_per_lun; | |
423 | ppa.g.lun = lun->lun_id; | |
424 | ppa.g.ch = lun->chnl_id; | |
425 | ||
426 | return ppa; | |
427 | } | |
428 | ||
e3eb3799 MB |
429 | static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) |
430 | { | |
431 | return dev->lptbl[slc_pg]; | |
432 | } | |
433 | ||
dece1635 | 434 | typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); |
cd9e9808 | 435 | typedef sector_t (nvm_tgt_capacity_fn)(void *); |
cd9e9808 MB |
436 | typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); |
437 | typedef void (nvm_tgt_exit_fn)(void *); | |
438 | ||
439 | struct nvm_tgt_type { | |
440 | const char *name; | |
441 | unsigned int version[3]; | |
442 | ||
443 | /* target entry points */ | |
444 | nvm_tgt_make_rq_fn *make_rq; | |
445 | nvm_tgt_capacity_fn *capacity; | |
91276162 | 446 | nvm_end_io_fn *end_io; |
cd9e9808 MB |
447 | |
448 | /* module-specific init/teardown */ | |
449 | nvm_tgt_init_fn *init; | |
450 | nvm_tgt_exit_fn *exit; | |
451 | ||
452 | /* For internal use */ | |
453 | struct list_head list; | |
454 | }; | |
455 | ||
456 | extern int nvm_register_target(struct nvm_tgt_type *); | |
457 | extern void nvm_unregister_target(struct nvm_tgt_type *); | |
458 | ||
459 | extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); | |
460 | extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); | |
461 | ||
462 | typedef int (nvmm_register_fn)(struct nvm_dev *); | |
463 | typedef void (nvmm_unregister_fn)(struct nvm_dev *); | |
464 | typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, | |
465 | struct nvm_lun *, unsigned long); | |
466 | typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *); | |
467 | typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *); | |
468 | typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *); | |
469 | typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); | |
470 | typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); | |
cd9e9808 MB |
471 | typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, |
472 | unsigned long); | |
473 | typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); | |
da1e2849 WT |
474 | typedef int (nvmm_reserve_lun)(struct nvm_dev *, int); |
475 | typedef void (nvmm_release_lun)(struct nvm_dev *, int); | |
2fde0e48 | 476 | typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); |
cd9e9808 | 477 | |
4c9dacb8 WT |
478 | typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t); |
479 | typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t); | |
480 | ||
cd9e9808 MB |
481 | struct nvmm_type { |
482 | const char *name; | |
483 | unsigned int version[3]; | |
484 | ||
485 | nvmm_register_fn *register_mgr; | |
486 | nvmm_unregister_fn *unregister_mgr; | |
487 | ||
488 | /* Block administration callbacks */ | |
ff0e498b JG |
489 | nvmm_get_blk_fn *get_blk_unlocked; |
490 | nvmm_put_blk_fn *put_blk_unlocked; | |
cd9e9808 MB |
491 | nvmm_get_blk_fn *get_blk; |
492 | nvmm_put_blk_fn *put_blk; | |
493 | nvmm_open_blk_fn *open_blk; | |
494 | nvmm_close_blk_fn *close_blk; | |
495 | nvmm_flush_blk_fn *flush_blk; | |
496 | ||
497 | nvmm_submit_io_fn *submit_io; | |
cd9e9808 MB |
498 | nvmm_erase_blk_fn *erase_blk; |
499 | ||
500 | /* Configuration management */ | |
501 | nvmm_get_lun_fn *get_lun; | |
da1e2849 WT |
502 | nvmm_reserve_lun *reserve_lun; |
503 | nvmm_release_lun *release_lun; | |
cd9e9808 MB |
504 | |
505 | /* Statistics */ | |
2fde0e48 | 506 | nvmm_lun_info_print_fn *lun_info_print; |
4c9dacb8 WT |
507 | |
508 | nvmm_get_area_fn *get_area; | |
509 | nvmm_put_area_fn *put_area; | |
510 | ||
cd9e9808 MB |
511 | struct list_head list; |
512 | }; | |
513 | ||
514 | extern int nvm_register_mgr(struct nvmm_type *); | |
515 | extern void nvm_unregister_mgr(struct nvmm_type *); | |
516 | ||
ff0e498b JG |
517 | extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *, |
518 | struct nvm_lun *, unsigned long); | |
519 | extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *); | |
520 | ||
cd9e9808 MB |
521 | extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, |
522 | unsigned long); | |
523 | extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); | |
524 | ||
525 | extern int nvm_register(struct request_queue *, char *, | |
526 | struct nvm_dev_ops *); | |
527 | extern void nvm_unregister(char *); | |
528 | ||
529 | extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); | |
069368e9 MB |
530 | extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); |
531 | extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); | |
abd805ec MB |
532 | extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, |
533 | struct ppa_addr *, int); | |
534 | extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); | |
81e681d3 | 535 | extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); |
cd9e9808 | 536 | extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); |
91276162 | 537 | extern void nvm_end_io(struct nvm_rq *, int); |
09719b62 MB |
538 | extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, |
539 | void *, int); | |
1145e635 MB |
540 | extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int, |
541 | int, void *, int); | |
22e8c976 | 542 | extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); |
e3eb3799 MB |
543 | |
544 | /* sysblk.c */ | |
545 | #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */ | |
546 | ||
547 | /* system block on disk representation */ | |
548 | struct nvm_system_block { | |
549 | __be32 magic; /* magic signature */ | |
550 | __be32 seqnr; /* sequence number */ | |
551 | __be32 erase_cnt; /* erase count */ | |
552 | __be16 version; /* version number */ | |
553 | u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */ | |
554 | __be64 fs_ppa; /* PPA for media manager | |
555 | * superblock */ | |
556 | }; | |
557 | ||
558 | extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *); | |
559 | extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *); | |
560 | extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *); | |
8b4970c4 MB |
561 | |
562 | extern int nvm_dev_factory(struct nvm_dev *, int flags); | |
cd9e9808 MB |
563 | #else /* CONFIG_NVM */ |
564 | struct nvm_dev_ops; | |
565 | ||
566 | static inline int nvm_register(struct request_queue *q, char *disk_name, | |
567 | struct nvm_dev_ops *ops) | |
568 | { | |
569 | return -EINVAL; | |
570 | } | |
571 | static inline void nvm_unregister(char *disk_name) {} | |
572 | #endif /* CONFIG_NVM */ | |
573 | #endif /* LIGHTNVM.H */ |