Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
cd9e9808 MB |
2 | #ifndef NVM_H |
3 | #define NVM_H | |
4 | ||
b76eb20b | 5 | #include <linux/blkdev.h> |
a7fd9a4f | 6 | #include <linux/types.h> |
b76eb20b | 7 | #include <uapi/linux/lightnvm.h> |
a7fd9a4f | 8 | |
cd9e9808 MB |
9 | enum { |
10 | NVM_IO_OK = 0, | |
11 | NVM_IO_REQUEUE = 1, | |
12 | NVM_IO_DONE = 2, | |
13 | NVM_IO_ERR = 3, | |
14 | ||
15 | NVM_IOTYPE_NONE = 0, | |
16 | NVM_IOTYPE_GC = 1, | |
17 | }; | |
18 | ||
a7fd9a4f JA |
19 | #define NVM_BLK_BITS (16) |
20 | #define NVM_PG_BITS (16) | |
21 | #define NVM_SEC_BITS (8) | |
22 | #define NVM_PL_BITS (8) | |
23 | #define NVM_LUN_BITS (8) | |
df414b33 | 24 | #define NVM_CH_BITS (7) |
a7fd9a4f JA |
25 | |
26 | struct ppa_addr { | |
27 | /* Generic structure for all addresses */ | |
28 | union { | |
29 | struct { | |
30 | u64 blk : NVM_BLK_BITS; | |
31 | u64 pg : NVM_PG_BITS; | |
32 | u64 sec : NVM_SEC_BITS; | |
33 | u64 pl : NVM_PL_BITS; | |
34 | u64 lun : NVM_LUN_BITS; | |
35 | u64 ch : NVM_CH_BITS; | |
df414b33 | 36 | u64 reserved : 1; |
a7fd9a4f JA |
37 | } g; |
38 | ||
df414b33 MB |
39 | struct { |
40 | u64 line : 63; | |
41 | u64 is_cached : 1; | |
42 | } c; | |
43 | ||
a7fd9a4f JA |
44 | u64 ppa; |
45 | }; | |
46 | }; | |
47 | ||
48 | struct nvm_rq; | |
49 | struct nvm_id; | |
50 | struct nvm_dev; | |
8e53624d | 51 | struct nvm_tgt_dev; |
a7fd9a4f | 52 | |
e46f4e48 | 53 | typedef int (nvm_id_fn)(struct nvm_dev *); |
e11903f5 | 54 | typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); |
00ee6cc3 | 55 | typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); |
a7fd9a4f | 56 | typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); |
1a94b2d4 | 57 | typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *); |
a7fd9a4f JA |
58 | typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); |
59 | typedef void (nvm_destroy_dma_pool_fn)(void *); | |
60 | typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, | |
61 | dma_addr_t *); | |
62 | typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); | |
63 | ||
64 | struct nvm_dev_ops { | |
65 | nvm_id_fn *identity; | |
a7fd9a4f JA |
66 | nvm_op_bb_tbl_fn *get_bb_tbl; |
67 | nvm_op_set_bb_fn *set_bb_tbl; | |
68 | ||
69 | nvm_submit_io_fn *submit_io; | |
1a94b2d4 | 70 | nvm_submit_io_sync_fn *submit_io_sync; |
a7fd9a4f JA |
71 | |
72 | nvm_create_dma_pool_fn *create_dma_pool; | |
73 | nvm_destroy_dma_pool_fn *destroy_dma_pool; | |
74 | nvm_dev_dma_alloc_fn *dev_dma_alloc; | |
75 | nvm_dev_dma_free_fn *dev_dma_free; | |
a7fd9a4f JA |
76 | }; |
77 | ||
cd9e9808 MB |
78 | #ifdef CONFIG_NVM |
79 | ||
80 | #include <linux/blkdev.h> | |
cd9e9808 MB |
81 | #include <linux/file.h> |
82 | #include <linux/dmapool.h> | |
e3eb3799 | 83 | #include <uapi/linux/lightnvm.h> |
cd9e9808 MB |
84 | |
85 | enum { | |
86 | /* HW Responsibilities */ | |
87 | NVM_RSP_L2P = 1 << 0, | |
88 | NVM_RSP_ECC = 1 << 1, | |
89 | ||
90 | /* Physical Adressing Mode */ | |
91 | NVM_ADDRMODE_LINEAR = 0, | |
92 | NVM_ADDRMODE_CHANNEL = 1, | |
93 | ||
94 | /* Plane programming mode for LUN */ | |
d5bdec8d MB |
95 | NVM_PLANE_SINGLE = 1, |
96 | NVM_PLANE_DOUBLE = 2, | |
97 | NVM_PLANE_QUAD = 4, | |
cd9e9808 MB |
98 | |
99 | /* Status codes */ | |
100 | NVM_RSP_SUCCESS = 0x0, | |
101 | NVM_RSP_NOT_CHANGEABLE = 0x1, | |
102 | NVM_RSP_ERR_FAILWRITE = 0x40ff, | |
103 | NVM_RSP_ERR_EMPTYPAGE = 0x42ff, | |
402ab9a8 | 104 | NVM_RSP_ERR_FAILECC = 0x4281, |
38ea2f76 | 105 | NVM_RSP_ERR_FAILCRC = 0x4004, |
402ab9a8 | 106 | NVM_RSP_WARN_HIGHECC = 0x4700, |
cd9e9808 MB |
107 | |
108 | /* Device opcodes */ | |
cd9e9808 MB |
109 | NVM_OP_PWRITE = 0x91, |
110 | NVM_OP_PREAD = 0x92, | |
111 | NVM_OP_ERASE = 0x90, | |
112 | ||
113 | /* PPA Command Flags */ | |
114 | NVM_IO_SNGL_ACCESS = 0x0, | |
115 | NVM_IO_DUAL_ACCESS = 0x1, | |
116 | NVM_IO_QUAD_ACCESS = 0x2, | |
117 | ||
57b4bd06 | 118 | /* NAND Access Modes */ |
cd9e9808 MB |
119 | NVM_IO_SUSPEND = 0x80, |
120 | NVM_IO_SLC_MODE = 0x100, | |
a7737f39 | 121 | NVM_IO_SCRAMBLE_ENABLE = 0x200, |
57b4bd06 MB |
122 | |
123 | /* Block Types */ | |
124 | NVM_BLK_T_FREE = 0x0, | |
125 | NVM_BLK_T_BAD = 0x1, | |
b5d4acd4 MB |
126 | NVM_BLK_T_GRWN_BAD = 0x2, |
127 | NVM_BLK_T_DEV = 0x4, | |
128 | NVM_BLK_T_HOST = 0x8, | |
f9a99950 MB |
129 | |
130 | /* Memory capabilities */ | |
131 | NVM_ID_CAP_SLC = 0x1, | |
132 | NVM_ID_CAP_CMD_SUSPEND = 0x2, | |
133 | NVM_ID_CAP_SCRAMBLE = 0x4, | |
134 | NVM_ID_CAP_ENCRYPT = 0x8, | |
ca5927e7 MB |
135 | |
136 | /* Memory types */ | |
137 | NVM_ID_FMTYPE_SLC = 0, | |
138 | NVM_ID_FMTYPE_MLC = 1, | |
bf643185 MB |
139 | |
140 | /* Device capabilities */ | |
141 | NVM_ID_DCAP_BBLKMGMT = 0x1, | |
142 | NVM_UD_DCAP_ECC = 0x2, | |
ca5927e7 MB |
143 | }; |
144 | ||
145 | struct nvm_id_lp_mlc { | |
146 | u16 num_pairs; | |
147 | u8 pairs[886]; | |
148 | }; | |
149 | ||
150 | struct nvm_id_lp_tbl { | |
151 | __u8 id[8]; | |
152 | struct nvm_id_lp_mlc mlc; | |
cd9e9808 MB |
153 | }; |
154 | ||
e46f4e48 | 155 | struct nvm_addrf_12 { |
c6ac3f35 | 156 | u8 ch_len; |
c6ac3f35 | 157 | u8 lun_len; |
c6ac3f35 | 158 | u8 blk_len; |
c6ac3f35 | 159 | u8 pg_len; |
e46f4e48 | 160 | u8 pln_len; |
c6ac3f35 | 161 | u8 sect_len; |
c6ac3f35 | 162 | |
e46f4e48 JG |
163 | u8 ch_offset; |
164 | u8 lun_offset; | |
165 | u8 blk_offset; | |
166 | u8 pg_offset; | |
167 | u8 pln_offset; | |
168 | u8 sect_offset; | |
fae7fae4 | 169 | |
e46f4e48 JG |
170 | u64 ch_mask; |
171 | u64 lun_mask; | |
172 | u64 blk_mask; | |
173 | u64 pg_mask; | |
174 | u64 pln_mask; | |
175 | u64 sec_mask; | |
176 | }; | |
62771fe0 | 177 | |
e46f4e48 JG |
178 | struct nvm_addrf { |
179 | u8 ch_len; | |
180 | u8 lun_len; | |
181 | u8 chk_len; | |
182 | u8 sec_len; | |
183 | u8 rsv_len[2]; | |
c6ac3f35 | 184 | |
e46f4e48 JG |
185 | u8 ch_offset; |
186 | u8 lun_offset; | |
187 | u8 chk_offset; | |
188 | u8 sec_offset; | |
189 | u8 rsv_off[2]; | |
190 | ||
191 | u64 ch_mask; | |
192 | u64 lun_mask; | |
193 | u64 chk_mask; | |
194 | u64 sec_mask; | |
195 | u64 rsv_mask[2]; | |
196 | }; | |
cd9e9808 MB |
197 | |
198 | struct nvm_target { | |
199 | struct list_head list; | |
8e79b5cb | 200 | struct nvm_tgt_dev *dev; |
cd9e9808 MB |
201 | struct nvm_tgt_type *type; |
202 | struct gendisk *disk; | |
203 | }; | |
204 | ||
cd9e9808 MB |
205 | #define ADDR_EMPTY (~0ULL) |
206 | ||
e5392739 JG |
207 | #define NVM_TARGET_DEFAULT_OP (101) |
208 | #define NVM_TARGET_MIN_OP (3) | |
209 | #define NVM_TARGET_MAX_OP (80) | |
210 | ||
cd9e9808 MB |
211 | #define NVM_VERSION_MAJOR 1 |
212 | #define NVM_VERSION_MINOR 0 | |
213 | #define NVM_VERSION_PATCH 0 | |
214 | ||
89a09c56 MB |
215 | #define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */ |
216 | ||
91276162 | 217 | struct nvm_rq; |
72d256ec | 218 | typedef void (nvm_end_io_fn)(struct nvm_rq *); |
91276162 | 219 | |
cd9e9808 | 220 | struct nvm_rq { |
8e53624d | 221 | struct nvm_tgt_dev *dev; |
cd9e9808 MB |
222 | |
223 | struct bio *bio; | |
224 | ||
225 | union { | |
226 | struct ppa_addr ppa_addr; | |
227 | dma_addr_t dma_ppa_list; | |
228 | }; | |
229 | ||
230 | struct ppa_addr *ppa_list; | |
231 | ||
003fad37 JG |
232 | void *meta_list; |
233 | dma_addr_t dma_meta_list; | |
cd9e9808 | 234 | |
91276162 MB |
235 | nvm_end_io_fn *end_io; |
236 | ||
cd9e9808 | 237 | uint8_t opcode; |
6d5be959 | 238 | uint16_t nr_ppas; |
cd9e9808 | 239 | uint16_t flags; |
72d256ec | 240 | |
9f867268 | 241 | u64 ppa_status; /* ppa media status */ |
72d256ec | 242 | int error; |
06894efe MB |
243 | |
244 | void *private; | |
cd9e9808 MB |
245 | }; |
246 | ||
247 | static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) | |
248 | { | |
249 | return pdu - sizeof(struct nvm_rq); | |
250 | } | |
251 | ||
252 | static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) | |
253 | { | |
254 | return rqdata + 1; | |
255 | } | |
256 | ||
ff0e498b JG |
257 | enum { |
258 | NVM_BLK_ST_FREE = 0x1, /* Free block */ | |
077d2389 | 259 | NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ |
ff0e498b | 260 | NVM_BLK_ST_BAD = 0x8, /* Bad block */ |
cd9e9808 MB |
261 | }; |
262 | ||
e46f4e48 | 263 | /* Instance geometry */ |
8e79b5cb | 264 | struct nvm_geo { |
e46f4e48 JG |
265 | /* device reported version */ |
266 | u8 ver_id; | |
267 | ||
268 | /* instance specific geometry */ | |
cd9e9808 | 269 | int nr_chnls; |
e46f4e48 | 270 | int nr_luns; /* per channel */ |
fae7fae4 | 271 | |
e46f4e48 JG |
272 | /* calculated values */ |
273 | int all_luns; /* across channels */ | |
274 | int all_chunks; /* across channels */ | |
275 | ||
276 | int op; /* over-provision in instance */ | |
277 | ||
278 | sector_t total_secs; /* across channels */ | |
279 | ||
280 | /* chunk geometry */ | |
281 | u32 nr_chks; /* chunks per lun */ | |
282 | u32 clba; /* sectors per chunk */ | |
283 | u16 csecs; /* sector size */ | |
284 | u16 sos; /* out-of-band area size */ | |
cd9e9808 | 285 | |
e46f4e48 JG |
286 | /* device write constrains */ |
287 | u32 ws_min; /* minimum write size */ | |
288 | u32 ws_opt; /* optimal write size */ | |
289 | u32 mw_cunits; /* distance required for successful read */ | |
fae7fae4 | 290 | |
e46f4e48 JG |
291 | /* device capabilities */ |
292 | u32 mccap; | |
fae7fae4 | 293 | |
e46f4e48 JG |
294 | /* device timings */ |
295 | u32 trdt; /* Avg. Tread (ns) */ | |
296 | u32 trdm; /* Max Tread (ns) */ | |
297 | u32 tprt; /* Avg. Tprog (ns) */ | |
298 | u32 tprm; /* Max Tprog (ns) */ | |
299 | u32 tbet; /* Avg. Terase (ns) */ | |
300 | u32 tbem; /* Max Terase (ns) */ | |
e5392739 | 301 | |
e46f4e48 JG |
302 | /* generic address format */ |
303 | struct nvm_addrf addrf; | |
fae7fae4 | 304 | |
e46f4e48 JG |
305 | /* 1.2 compatibility */ |
306 | u8 vmnt; | |
307 | u32 cap; | |
308 | u32 dom; | |
309 | ||
310 | u8 mtype; | |
311 | u8 fmtype; | |
312 | ||
313 | u16 cpar; | |
314 | u32 mpos; | |
315 | ||
316 | u8 num_pln; | |
317 | u8 plane_mode; | |
318 | u16 num_pg; | |
319 | u16 fpg_sz; | |
8e79b5cb JG |
320 | }; |
321 | ||
ade69e24 | 322 | /* sub-device structure */ |
8e79b5cb JG |
323 | struct nvm_tgt_dev { |
324 | /* Device information */ | |
325 | struct nvm_geo geo; | |
326 | ||
8e53624d JG |
327 | /* Base ppas for target LUNs */ |
328 | struct ppa_addr *luns; | |
329 | ||
8e79b5cb JG |
330 | struct request_queue *q; |
331 | ||
959e911b | 332 | struct nvm_dev *parent; |
8e53624d | 333 | void *map; |
8e79b5cb JG |
334 | }; |
335 | ||
336 | struct nvm_dev { | |
337 | struct nvm_dev_ops *ops; | |
338 | ||
339 | struct list_head devices; | |
340 | ||
8e79b5cb JG |
341 | /* Device information */ |
342 | struct nvm_geo geo; | |
cd9e9808 | 343 | |
da1e2849 | 344 | unsigned long *lun_map; |
75b85649 | 345 | void *dma_pool; |
cd9e9808 | 346 | |
cd9e9808 MB |
347 | /* Backend device */ |
348 | struct request_queue *q; | |
349 | char name[DISK_NAME_LEN]; | |
40267efd | 350 | void *private_data; |
e3eb3799 | 351 | |
8e53624d JG |
352 | void *rmap; |
353 | ||
e3eb3799 | 354 | struct mutex mlock; |
4c9dacb8 | 355 | spinlock_t lock; |
ade69e24 MB |
356 | |
357 | /* target management */ | |
358 | struct list_head area_list; | |
359 | struct list_head targets; | |
cd9e9808 MB |
360 | }; |
361 | ||
dab8ee9e MB |
362 | static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, |
363 | struct ppa_addr r) | |
cd9e9808 | 364 | { |
dab8ee9e | 365 | struct nvm_geo *geo = &tgt_dev->geo; |
e46f4e48 | 366 | struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; |
cd9e9808 MB |
367 | struct ppa_addr l; |
368 | ||
e46f4e48 JG |
369 | l.ppa = ((u64)r.g.ch) << ppaf->ch_offset; |
370 | l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset; | |
371 | l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; | |
372 | l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; | |
373 | l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; | |
374 | l.ppa |= ((u64)r.g.sec) << ppaf->sect_offset; | |
cd9e9808 MB |
375 | |
376 | return l; | |
377 | } | |
378 | ||
dab8ee9e MB |
379 | static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev, |
380 | struct ppa_addr r) | |
cd9e9808 | 381 | { |
dab8ee9e | 382 | struct nvm_geo *geo = &tgt_dev->geo; |
e46f4e48 | 383 | struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; |
cd9e9808 MB |
384 | struct ppa_addr l; |
385 | ||
5389a1df | 386 | l.ppa = 0; |
e46f4e48 JG |
387 | |
388 | l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset; | |
389 | l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset; | |
390 | l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; | |
391 | l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; | |
392 | l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; | |
393 | l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sect_offset; | |
cd9e9808 MB |
394 | |
395 | return l; | |
396 | } | |
397 | ||
dece1635 | 398 | typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); |
cd9e9808 | 399 | typedef sector_t (nvm_tgt_capacity_fn)(void *); |
4af3f75d JG |
400 | typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, |
401 | int flags); | |
cd9e9808 | 402 | typedef void (nvm_tgt_exit_fn)(void *); |
9a69b0ed JG |
403 | typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *); |
404 | typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *); | |
cd9e9808 MB |
405 | |
406 | struct nvm_tgt_type { | |
407 | const char *name; | |
408 | unsigned int version[3]; | |
409 | ||
410 | /* target entry points */ | |
411 | nvm_tgt_make_rq_fn *make_rq; | |
412 | nvm_tgt_capacity_fn *capacity; | |
cd9e9808 MB |
413 | |
414 | /* module-specific init/teardown */ | |
415 | nvm_tgt_init_fn *init; | |
416 | nvm_tgt_exit_fn *exit; | |
417 | ||
9a69b0ed JG |
418 | /* sysfs */ |
419 | nvm_tgt_sysfs_init_fn *sysfs_init; | |
420 | nvm_tgt_sysfs_exit_fn *sysfs_exit; | |
421 | ||
cd9e9808 MB |
422 | /* For internal use */ |
423 | struct list_head list; | |
90014829 | 424 | struct module *owner; |
cd9e9808 MB |
425 | }; |
426 | ||
6063fe39 SL |
427 | extern int nvm_register_tgt_type(struct nvm_tgt_type *); |
428 | extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); | |
cd9e9808 MB |
429 | |
430 | extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); | |
431 | extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); | |
432 | ||
b0b4e09c MB |
433 | extern struct nvm_dev *nvm_alloc_dev(int); |
434 | extern int nvm_register(struct nvm_dev *); | |
435 | extern void nvm_unregister(struct nvm_dev *); | |
cd9e9808 | 436 | |
333ba053 JG |
437 | extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, |
438 | int, int); | |
8e53624d | 439 | extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); |
1a94b2d4 | 440 | extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); |
06894efe | 441 | extern void nvm_end_io(struct nvm_rq *); |
22e8c976 | 442 | extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); |
333ba053 | 443 | extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); |
e3eb3799 | 444 | |
cd9e9808 MB |
445 | #else /* CONFIG_NVM */ |
446 | struct nvm_dev_ops; | |
447 | ||
b0b4e09c MB |
448 | static inline struct nvm_dev *nvm_alloc_dev(int node) |
449 | { | |
450 | return ERR_PTR(-EINVAL); | |
451 | } | |
452 | static inline int nvm_register(struct nvm_dev *dev) | |
cd9e9808 MB |
453 | { |
454 | return -EINVAL; | |
455 | } | |
b0b4e09c | 456 | static inline void nvm_unregister(struct nvm_dev *dev) {} |
cd9e9808 MB |
457 | #endif /* CONFIG_NVM */ |
458 | #endif /* LIGHTNVM.H */ |