Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[linux-2.6-block.git] / drivers / lightnvm / core.c
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3  * Initial release: Matias Bjorling <m@bjorling.me>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; see the file COPYING.  If not, write to
16  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17  * USA.
18  *
19  */
20
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/moduleparam.h>
26 #include <linux/miscdevice.h>
27 #include <linux/lightnvm.h>
28 #include <linux/sched/sysctl.h>
29
30 static LIST_HEAD(nvm_tgt_types);
31 static DECLARE_RWSEM(nvm_tgtt_lock);
32 static LIST_HEAD(nvm_mgrs);
33 static LIST_HEAD(nvm_devices);
34 static DECLARE_RWSEM(nvm_lock);
35
36 struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
37 {
38         struct nvm_tgt_type *tmp, *tt = NULL;
39
40         if (lock)
41                 down_write(&nvm_tgtt_lock);
42
43         list_for_each_entry(tmp, &nvm_tgt_types, list)
44                 if (!strcmp(name, tmp->name)) {
45                         tt = tmp;
46                         break;
47                 }
48
49         if (lock)
50                 up_write(&nvm_tgtt_lock);
51         return tt;
52 }
53 EXPORT_SYMBOL(nvm_find_target_type);
54
55 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
56 {
57         int ret = 0;
58
59         down_write(&nvm_tgtt_lock);
60         if (nvm_find_target_type(tt->name, 0))
61                 ret = -EEXIST;
62         else
63                 list_add(&tt->list, &nvm_tgt_types);
64         up_write(&nvm_tgtt_lock);
65
66         return ret;
67 }
68 EXPORT_SYMBOL(nvm_register_tgt_type);
69
70 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
71 {
72         if (!tt)
73                 return;
74
75         down_write(&nvm_lock);
76         list_del(&tt->list);
77         up_write(&nvm_lock);
78 }
79 EXPORT_SYMBOL(nvm_unregister_tgt_type);
80
81 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
82                                                         dma_addr_t *dma_handler)
83 {
84         return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
85                                                                 dma_handler);
86 }
87 EXPORT_SYMBOL(nvm_dev_dma_alloc);
88
89 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
90 {
91         dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
92 }
93 EXPORT_SYMBOL(nvm_dev_dma_free);
94
95 static struct nvmm_type *nvm_find_mgr_type(const char *name)
96 {
97         struct nvmm_type *mt;
98
99         list_for_each_entry(mt, &nvm_mgrs, list)
100                 if (!strcmp(name, mt->name))
101                         return mt;
102
103         return NULL;
104 }
105
106 static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
107 {
108         struct nvmm_type *mt;
109         int ret;
110
111         lockdep_assert_held(&nvm_lock);
112
113         list_for_each_entry(mt, &nvm_mgrs, list) {
114                 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
115                         continue;
116
117                 ret = mt->register_mgr(dev);
118                 if (ret < 0) {
119                         pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
120                                                                 ret, dev->name);
121                         return NULL; /* initialization failed */
122                 } else if (ret > 0)
123                         return mt;
124         }
125
126         return NULL;
127 }
128
129 int nvm_register_mgr(struct nvmm_type *mt)
130 {
131         struct nvm_dev *dev;
132         int ret = 0;
133
134         down_write(&nvm_lock);
135         if (nvm_find_mgr_type(mt->name)) {
136                 ret = -EEXIST;
137                 goto finish;
138         } else {
139                 list_add(&mt->list, &nvm_mgrs);
140         }
141
142         /* try to register media mgr if any device have none configured */
143         list_for_each_entry(dev, &nvm_devices, devices) {
144                 if (dev->mt)
145                         continue;
146
147                 dev->mt = nvm_init_mgr(dev);
148         }
149 finish:
150         up_write(&nvm_lock);
151
152         return ret;
153 }
154 EXPORT_SYMBOL(nvm_register_mgr);
155
156 void nvm_unregister_mgr(struct nvmm_type *mt)
157 {
158         if (!mt)
159                 return;
160
161         down_write(&nvm_lock);
162         list_del(&mt->list);
163         up_write(&nvm_lock);
164 }
165 EXPORT_SYMBOL(nvm_unregister_mgr);
166
167 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
168 {
169         struct nvm_dev *dev;
170
171         list_for_each_entry(dev, &nvm_devices, devices)
172                 if (!strcmp(name, dev->name))
173                         return dev;
174
175         return NULL;
176 }
177
178 static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
179                                          struct nvm_rq *rqd)
180 {
181         struct nvm_dev *dev = tgt_dev->parent;
182         int i;
183
184         if (rqd->nr_ppas > 1) {
185                 for (i = 0; i < rqd->nr_ppas; i++) {
186                         rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
187                                         rqd->ppa_list[i], TRANS_TGT_TO_DEV);
188                         rqd->ppa_list[i] = generic_to_dev_addr(dev,
189                                                         rqd->ppa_list[i]);
190                 }
191         } else {
192                 rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
193                                                 TRANS_TGT_TO_DEV);
194                 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
195         }
196 }
197
198 int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
199                                                                 int type)
200 {
201         struct nvm_rq rqd;
202         int ret;
203
204         if (nr_ppas > dev->ops->max_phys_sect) {
205                 pr_err("nvm: unable to update all sysblocks atomically\n");
206                 return -EINVAL;
207         }
208
209         memset(&rqd, 0, sizeof(struct nvm_rq));
210
211         nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
212         nvm_generic_to_addr_mode(dev, &rqd);
213
214         ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
215         nvm_free_rqd_ppalist(dev, &rqd);
216         if (ret) {
217                 pr_err("nvm: sysblk failed bb mark\n");
218                 return -EINVAL;
219         }
220
221         return 0;
222 }
223 EXPORT_SYMBOL(nvm_set_bb_tbl);
224
225 int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
226                        int nr_ppas, int type)
227 {
228         struct nvm_dev *dev = tgt_dev->parent;
229         struct nvm_rq rqd;
230         int ret;
231
232         if (nr_ppas > dev->ops->max_phys_sect) {
233                 pr_err("nvm: unable to update all blocks atomically\n");
234                 return -EINVAL;
235         }
236
237         memset(&rqd, 0, sizeof(struct nvm_rq));
238
239         nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
240         nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
241
242         ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
243         nvm_free_rqd_ppalist(dev, &rqd);
244         if (ret) {
245                 pr_err("nvm: sysblk failed bb mark\n");
246                 return -EINVAL;
247         }
248
249         return 0;
250 }
251 EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
252
253 int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
254 {
255         struct nvm_dev *dev = tgt_dev->parent;
256
257         return dev->ops->max_phys_sect;
258 }
259 EXPORT_SYMBOL(nvm_max_phys_sects);
260
261 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
262 {
263         struct nvm_dev *dev = tgt_dev->parent;
264
265         return dev->mt->submit_io(tgt_dev, rqd);
266 }
267 EXPORT_SYMBOL(nvm_submit_io);
268
269 int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
270 {
271         struct nvm_dev *dev = tgt_dev->parent;
272
273         return dev->mt->erase_blk(tgt_dev, p, flags);
274 }
275 EXPORT_SYMBOL(nvm_erase_blk);
276
277 int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
278                     nvm_l2p_update_fn *update_l2p, void *priv)
279 {
280         struct nvm_dev *dev = tgt_dev->parent;
281
282         if (!dev->ops->get_l2p_tbl)
283                 return 0;
284
285         return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
286 }
287 EXPORT_SYMBOL(nvm_get_l2p_tbl);
288
289 int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
290 {
291         struct nvm_dev *dev = tgt_dev->parent;
292
293         return dev->mt->get_area(dev, lba, len);
294 }
295 EXPORT_SYMBOL(nvm_get_area);
296
297 void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba)
298 {
299         struct nvm_dev *dev = tgt_dev->parent;
300
301         dev->mt->put_area(dev, lba);
302 }
303 EXPORT_SYMBOL(nvm_put_area);
304
305 void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
306 {
307         int i;
308
309         if (rqd->nr_ppas > 1) {
310                 for (i = 0; i < rqd->nr_ppas; i++)
311                         rqd->ppa_list[i] = dev_to_generic_addr(dev,
312                                                         rqd->ppa_list[i]);
313         } else {
314                 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
315         }
316 }
317 EXPORT_SYMBOL(nvm_addr_to_generic_mode);
318
319 void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
320 {
321         int i;
322
323         if (rqd->nr_ppas > 1) {
324                 for (i = 0; i < rqd->nr_ppas; i++)
325                         rqd->ppa_list[i] = generic_to_dev_addr(dev,
326                                                         rqd->ppa_list[i]);
327         } else {
328                 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
329         }
330 }
331 EXPORT_SYMBOL(nvm_generic_to_addr_mode);
332
333 int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
334                         const struct ppa_addr *ppas, int nr_ppas, int vblk)
335 {
336         struct nvm_geo *geo = &dev->geo;
337         int i, plane_cnt, pl_idx;
338         struct ppa_addr ppa;
339
340         if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
341                 rqd->nr_ppas = nr_ppas;
342                 rqd->ppa_addr = ppas[0];
343
344                 return 0;
345         }
346
347         rqd->nr_ppas = nr_ppas;
348         rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
349         if (!rqd->ppa_list) {
350                 pr_err("nvm: failed to allocate dma memory\n");
351                 return -ENOMEM;
352         }
353
354         if (!vblk) {
355                 for (i = 0; i < nr_ppas; i++)
356                         rqd->ppa_list[i] = ppas[i];
357         } else {
358                 plane_cnt = geo->plane_mode;
359                 rqd->nr_ppas *= plane_cnt;
360
361                 for (i = 0; i < nr_ppas; i++) {
362                         for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
363                                 ppa = ppas[i];
364                                 ppa.g.pl = pl_idx;
365                                 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
366                         }
367                 }
368         }
369
370         return 0;
371 }
372 EXPORT_SYMBOL(nvm_set_rqd_ppalist);
373
374 void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
375 {
376         if (!rqd->ppa_list)
377                 return;
378
379         nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
380 }
381 EXPORT_SYMBOL(nvm_free_rqd_ppalist);
382
383 int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
384                                                                 int flags)
385 {
386         struct nvm_rq rqd;
387         int ret;
388
389         if (!dev->ops->erase_block)
390                 return 0;
391
392         memset(&rqd, 0, sizeof(struct nvm_rq));
393
394         ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
395         if (ret)
396                 return ret;
397
398         nvm_generic_to_addr_mode(dev, &rqd);
399
400         rqd.flags = flags;
401
402         ret = dev->ops->erase_block(dev, &rqd);
403
404         nvm_free_rqd_ppalist(dev, &rqd);
405
406         return ret;
407 }
408 EXPORT_SYMBOL(nvm_erase_ppa);
409
410 void nvm_end_io(struct nvm_rq *rqd, int error)
411 {
412         rqd->error = error;
413         rqd->end_io(rqd);
414 }
415 EXPORT_SYMBOL(nvm_end_io);
416
417 static void nvm_end_io_sync(struct nvm_rq *rqd)
418 {
419         struct completion *waiting = rqd->wait;
420
421         rqd->wait = NULL;
422
423         complete(waiting);
424 }
425
426 static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
427                                                 int flags, void *buf, int len)
428 {
429         DECLARE_COMPLETION_ONSTACK(wait);
430         struct bio *bio;
431         int ret;
432         unsigned long hang_check;
433
434         bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
435         if (IS_ERR_OR_NULL(bio))
436                 return -ENOMEM;
437
438         nvm_generic_to_addr_mode(dev, rqd);
439
440         rqd->dev = NULL;
441         rqd->opcode = opcode;
442         rqd->flags = flags;
443         rqd->bio = bio;
444         rqd->wait = &wait;
445         rqd->end_io = nvm_end_io_sync;
446
447         ret = dev->ops->submit_io(dev, rqd);
448         if (ret) {
449                 bio_put(bio);
450                 return ret;
451         }
452
453         /* Prevent hang_check timer from firing at us during very long I/O */
454         hang_check = sysctl_hung_task_timeout_secs;
455         if (hang_check)
456                 while (!wait_for_completion_io_timeout(&wait,
457                                                         hang_check * (HZ/2)))
458                         ;
459         else
460                 wait_for_completion_io(&wait);
461
462         return rqd->error;
463 }
464
465 /**
466  * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
467  *                       take to free ppa list if necessary.
468  * @dev:        device
469  * @ppa_list:   user created ppa_list
470  * @nr_ppas:    length of ppa_list
471  * @opcode:     device opcode
472  * @flags:      device flags
473  * @buf:        data buffer
474  * @len:        data buffer length
475  */
476 int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
477                         int nr_ppas, int opcode, int flags, void *buf, int len)
478 {
479         struct nvm_rq rqd;
480
481         if (dev->ops->max_phys_sect < nr_ppas)
482                 return -EINVAL;
483
484         memset(&rqd, 0, sizeof(struct nvm_rq));
485
486         rqd.nr_ppas = nr_ppas;
487         if (nr_ppas > 1)
488                 rqd.ppa_list = ppa_list;
489         else
490                 rqd.ppa_addr = ppa_list[0];
491
492         return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
493 }
494 EXPORT_SYMBOL(nvm_submit_ppa_list);
495
496 /**
497  * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
498  *                  as single, dual, quad plane PPAs depending on device type.
499  * @dev:        device
500  * @ppa:        user created ppa_list
501  * @nr_ppas:    length of ppa_list
502  * @opcode:     device opcode
503  * @flags:      device flags
504  * @buf:        data buffer
505  * @len:        data buffer length
506  */
507 int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
508                                 int opcode, int flags, void *buf, int len)
509 {
510         struct nvm_rq rqd;
511         int ret;
512
513         memset(&rqd, 0, sizeof(struct nvm_rq));
514         ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
515         if (ret)
516                 return ret;
517
518         ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
519
520         nvm_free_rqd_ppalist(dev, &rqd);
521
522         return ret;
523 }
524 EXPORT_SYMBOL(nvm_submit_ppa);
525
526 /*
527  * folds a bad block list from its plane representation to its virtual
528  * block representation. The fold is done in place and reduced size is
529  * returned.
530  *
531  * If any of the planes status are bad or grown bad block, the virtual block
532  * is marked bad. If not bad, the first plane state acts as the block state.
533  */
534 int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
535 {
536         struct nvm_geo *geo = &dev->geo;
537         int blk, offset, pl, blktype;
538
539         if (nr_blks != geo->blks_per_lun * geo->plane_mode)
540                 return -EINVAL;
541
542         for (blk = 0; blk < geo->blks_per_lun; blk++) {
543                 offset = blk * geo->plane_mode;
544                 blktype = blks[offset];
545
546                 /* Bad blocks on any planes take precedence over other types */
547                 for (pl = 0; pl < geo->plane_mode; pl++) {
548                         if (blks[offset + pl] &
549                                         (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
550                                 blktype = blks[offset + pl];
551                                 break;
552                         }
553                 }
554
555                 blks[blk] = blktype;
556         }
557
558         return geo->blks_per_lun;
559 }
560 EXPORT_SYMBOL(nvm_bb_tbl_fold);
561
562 int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
563 {
564         ppa = generic_to_dev_addr(dev, ppa);
565
566         return dev->ops->get_bb_tbl(dev, ppa, blks);
567 }
568 EXPORT_SYMBOL(nvm_get_bb_tbl);
569
570 int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
571                        u8 *blks)
572 {
573         struct nvm_dev *dev = tgt_dev->parent;
574
575         ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
576         return nvm_get_bb_tbl(dev, ppa, blks);
577 }
578 EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
579
580 static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
581 {
582         struct nvm_geo *geo = &dev->geo;
583         int i;
584
585         dev->lps_per_blk = geo->pgs_per_blk;
586         dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
587         if (!dev->lptbl)
588                 return -ENOMEM;
589
590         /* Just a linear array */
591         for (i = 0; i < dev->lps_per_blk; i++)
592                 dev->lptbl[i] = i;
593
594         return 0;
595 }
596
597 static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
598 {
599         int i, p;
600         struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
601
602         if (!mlc->num_pairs)
603                 return 0;
604
605         dev->lps_per_blk = mlc->num_pairs;
606         dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
607         if (!dev->lptbl)
608                 return -ENOMEM;
609
610         /* The lower page table encoding consists of a list of bytes, where each
611          * has a lower and an upper half. The first half byte maintains the
612          * increment value and every value after is an offset added to the
613          * previous incrementation value
614          */
615         dev->lptbl[0] = mlc->pairs[0] & 0xF;
616         for (i = 1; i < dev->lps_per_blk; i++) {
617                 p = mlc->pairs[i >> 1];
618                 if (i & 0x1) /* upper */
619                         dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
620                 else /* lower */
621                         dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
622         }
623
624         return 0;
625 }
626
627 static int nvm_core_init(struct nvm_dev *dev)
628 {
629         struct nvm_id *id = &dev->identity;
630         struct nvm_id_group *grp = &id->groups[0];
631         struct nvm_geo *geo = &dev->geo;
632         int ret;
633
634         /* Whole device values */
635         geo->nr_chnls = grp->num_ch;
636         geo->luns_per_chnl = grp->num_lun;
637
638         /* Generic device values */
639         geo->pgs_per_blk = grp->num_pg;
640         geo->blks_per_lun = grp->num_blk;
641         geo->nr_planes = grp->num_pln;
642         geo->fpg_size = grp->fpg_sz;
643         geo->pfpg_size = grp->fpg_sz * grp->num_pln;
644         geo->sec_size = grp->csecs;
645         geo->oob_size = grp->sos;
646         geo->sec_per_pg = grp->fpg_sz / grp->csecs;
647         geo->mccap = grp->mccap;
648         memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
649
650         geo->plane_mode = NVM_PLANE_SINGLE;
651         geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
652
653         if (grp->mpos & 0x020202)
654                 geo->plane_mode = NVM_PLANE_DOUBLE;
655         if (grp->mpos & 0x040404)
656                 geo->plane_mode = NVM_PLANE_QUAD;
657
658         if (grp->mtype != 0) {
659                 pr_err("nvm: memory type not supported\n");
660                 return -EINVAL;
661         }
662
663         /* calculated values */
664         geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
665         geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
666         geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
667         geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
668
669         dev->total_secs = geo->nr_luns * geo->sec_per_lun;
670         dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
671                                         sizeof(unsigned long), GFP_KERNEL);
672         if (!dev->lun_map)
673                 return -ENOMEM;
674
675         switch (grp->fmtype) {
676         case NVM_ID_FMTYPE_SLC:
677                 if (nvm_init_slc_tbl(dev, grp)) {
678                         ret = -ENOMEM;
679                         goto err_fmtype;
680                 }
681                 break;
682         case NVM_ID_FMTYPE_MLC:
683                 if (nvm_init_mlc_tbl(dev, grp)) {
684                         ret = -ENOMEM;
685                         goto err_fmtype;
686                 }
687                 break;
688         default:
689                 pr_err("nvm: flash type not supported\n");
690                 ret = -EINVAL;
691                 goto err_fmtype;
692         }
693
694         mutex_init(&dev->mlock);
695         spin_lock_init(&dev->lock);
696
697         blk_queue_logical_block_size(dev->q, geo->sec_size);
698
699         return 0;
700 err_fmtype:
701         kfree(dev->lun_map);
702         return ret;
703 }
704
705 static void nvm_free_mgr(struct nvm_dev *dev)
706 {
707         if (!dev->mt)
708                 return;
709
710         dev->mt->unregister_mgr(dev);
711         dev->mt = NULL;
712 }
713
714 void nvm_free(struct nvm_dev *dev)
715 {
716         if (!dev)
717                 return;
718
719         nvm_free_mgr(dev);
720
721         if (dev->dma_pool)
722                 dev->ops->destroy_dma_pool(dev->dma_pool);
723
724         kfree(dev->lptbl);
725         kfree(dev->lun_map);
726         kfree(dev);
727 }
728
729 static int nvm_init(struct nvm_dev *dev)
730 {
731         struct nvm_geo *geo = &dev->geo;
732         int ret = -EINVAL;
733
734         if (!dev->q || !dev->ops)
735                 return ret;
736
737         if (dev->ops->identity(dev, &dev->identity)) {
738                 pr_err("nvm: device could not be identified\n");
739                 goto err;
740         }
741
742         pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
743                         dev->identity.ver_id, dev->identity.vmnt,
744                                                         dev->identity.cgrps);
745
746         if (dev->identity.ver_id != 1) {
747                 pr_err("nvm: device not supported by kernel.");
748                 goto err;
749         }
750
751         if (dev->identity.cgrps != 1) {
752                 pr_err("nvm: only one group configuration supported.");
753                 goto err;
754         }
755
756         ret = nvm_core_init(dev);
757         if (ret) {
758                 pr_err("nvm: could not initialize core structures.\n");
759                 goto err;
760         }
761
762         pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
763                         dev->name, geo->sec_per_pg, geo->nr_planes,
764                         geo->pgs_per_blk, geo->blks_per_lun,
765                         geo->nr_luns, geo->nr_chnls);
766         return 0;
767 err:
768         pr_err("nvm: failed to initialize nvm\n");
769         return ret;
770 }
771
772 struct nvm_dev *nvm_alloc_dev(int node)
773 {
774         return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
775 }
776 EXPORT_SYMBOL(nvm_alloc_dev);
777
778 int nvm_register(struct nvm_dev *dev)
779 {
780         int ret;
781
782         ret = nvm_init(dev);
783         if (ret)
784                 goto err_init;
785
786         if (dev->ops->max_phys_sect > 256) {
787                 pr_info("nvm: max sectors supported is 256.\n");
788                 ret = -EINVAL;
789                 goto err_init;
790         }
791
792         if (dev->ops->max_phys_sect > 1) {
793                 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
794                 if (!dev->dma_pool) {
795                         pr_err("nvm: could not create dma pool\n");
796                         ret = -ENOMEM;
797                         goto err_init;
798                 }
799         }
800
801         if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
802                 ret = nvm_get_sysblock(dev, &dev->sb);
803                 if (!ret)
804                         pr_err("nvm: device not initialized.\n");
805                 else if (ret < 0)
806                         pr_err("nvm: err (%d) on device initialization\n", ret);
807         }
808
809         /* register device with a supported media manager */
810         down_write(&nvm_lock);
811         if (ret > 0)
812                 dev->mt = nvm_init_mgr(dev);
813         list_add(&dev->devices, &nvm_devices);
814         up_write(&nvm_lock);
815
816         return 0;
817 err_init:
818         kfree(dev->lun_map);
819         return ret;
820 }
821 EXPORT_SYMBOL(nvm_register);
822
823 void nvm_unregister(struct nvm_dev *dev)
824 {
825         down_write(&nvm_lock);
826         list_del(&dev->devices);
827         up_write(&nvm_lock);
828
829         nvm_free(dev);
830 }
831 EXPORT_SYMBOL(nvm_unregister);
832
833 static int __nvm_configure_create(struct nvm_ioctl_create *create)
834 {
835         struct nvm_dev *dev;
836         struct nvm_ioctl_create_simple *s;
837
838         down_write(&nvm_lock);
839         dev = nvm_find_nvm_dev(create->dev);
840         up_write(&nvm_lock);
841
842         if (!dev) {
843                 pr_err("nvm: device not found\n");
844                 return -EINVAL;
845         }
846
847         if (!dev->mt) {
848                 pr_info("nvm: device has no media manager registered.\n");
849                 return -ENODEV;
850         }
851
852         if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
853                 pr_err("nvm: config type not valid\n");
854                 return -EINVAL;
855         }
856         s = &create->conf.s;
857
858         if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
859                 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
860                         s->lun_begin, s->lun_end, dev->geo.nr_luns);
861                 return -EINVAL;
862         }
863
864         return dev->mt->create_tgt(dev, create);
865 }
866
867 static long nvm_ioctl_info(struct file *file, void __user *arg)
868 {
869         struct nvm_ioctl_info *info;
870         struct nvm_tgt_type *tt;
871         int tgt_iter = 0;
872
873         if (!capable(CAP_SYS_ADMIN))
874                 return -EPERM;
875
876         info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
877         if (IS_ERR(info))
878                 return -EFAULT;
879
880         info->version[0] = NVM_VERSION_MAJOR;
881         info->version[1] = NVM_VERSION_MINOR;
882         info->version[2] = NVM_VERSION_PATCH;
883
884         down_write(&nvm_lock);
885         list_for_each_entry(tt, &nvm_tgt_types, list) {
886                 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
887
888                 tgt->version[0] = tt->version[0];
889                 tgt->version[1] = tt->version[1];
890                 tgt->version[2] = tt->version[2];
891                 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
892
893                 tgt_iter++;
894         }
895
896         info->tgtsize = tgt_iter;
897         up_write(&nvm_lock);
898
899         if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
900                 kfree(info);
901                 return -EFAULT;
902         }
903
904         kfree(info);
905         return 0;
906 }
907
908 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
909 {
910         struct nvm_ioctl_get_devices *devices;
911         struct nvm_dev *dev;
912         int i = 0;
913
914         if (!capable(CAP_SYS_ADMIN))
915                 return -EPERM;
916
917         devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
918         if (!devices)
919                 return -ENOMEM;
920
921         down_write(&nvm_lock);
922         list_for_each_entry(dev, &nvm_devices, devices) {
923                 struct nvm_ioctl_device_info *info = &devices->info[i];
924
925                 sprintf(info->devname, "%s", dev->name);
926                 if (dev->mt) {
927                         info->bmversion[0] = dev->mt->version[0];
928                         info->bmversion[1] = dev->mt->version[1];
929                         info->bmversion[2] = dev->mt->version[2];
930                         sprintf(info->bmname, "%s", dev->mt->name);
931                 } else {
932                         sprintf(info->bmname, "none");
933                 }
934
935                 i++;
936                 if (i > 31) {
937                         pr_err("nvm: max 31 devices can be reported.\n");
938                         break;
939                 }
940         }
941         up_write(&nvm_lock);
942
943         devices->nr_devices = i;
944
945         if (copy_to_user(arg, devices,
946                          sizeof(struct nvm_ioctl_get_devices))) {
947                 kfree(devices);
948                 return -EFAULT;
949         }
950
951         kfree(devices);
952         return 0;
953 }
954
955 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
956 {
957         struct nvm_ioctl_create create;
958
959         if (!capable(CAP_SYS_ADMIN))
960                 return -EPERM;
961
962         if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
963                 return -EFAULT;
964
965         create.dev[DISK_NAME_LEN - 1] = '\0';
966         create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
967         create.tgtname[DISK_NAME_LEN - 1] = '\0';
968
969         if (create.flags != 0) {
970                 pr_err("nvm: no flags supported\n");
971                 return -EINVAL;
972         }
973
974         return __nvm_configure_create(&create);
975 }
976
977 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
978 {
979         struct nvm_ioctl_remove remove;
980         struct nvm_dev *dev;
981         int ret = 0;
982
983         if (!capable(CAP_SYS_ADMIN))
984                 return -EPERM;
985
986         if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
987                 return -EFAULT;
988
989         remove.tgtname[DISK_NAME_LEN - 1] = '\0';
990
991         if (remove.flags != 0) {
992                 pr_err("nvm: no flags supported\n");
993                 return -EINVAL;
994         }
995
996         list_for_each_entry(dev, &nvm_devices, devices) {
997                 ret = dev->mt->remove_tgt(dev, &remove);
998                 if (!ret)
999                         break;
1000         }
1001
1002         return ret;
1003 }
1004
1005 static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
1006 {
1007         info->seqnr = 1;
1008         info->erase_cnt = 0;
1009         info->version = 1;
1010 }
1011
1012 static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1013 {
1014         struct nvm_dev *dev;
1015         struct nvm_sb_info info;
1016         int ret;
1017
1018         down_write(&nvm_lock);
1019         dev = nvm_find_nvm_dev(init->dev);
1020         up_write(&nvm_lock);
1021         if (!dev) {
1022                 pr_err("nvm: device not found\n");
1023                 return -EINVAL;
1024         }
1025
1026         nvm_setup_nvm_sb_info(&info);
1027
1028         strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1029         info.fs_ppa.ppa = -1;
1030
1031         if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1032                 ret = nvm_init_sysblock(dev, &info);
1033                 if (ret)
1034                         return ret;
1035         }
1036
1037         memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1038
1039         down_write(&nvm_lock);
1040         dev->mt = nvm_init_mgr(dev);
1041         up_write(&nvm_lock);
1042
1043         return 0;
1044 }
1045
1046 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1047 {
1048         struct nvm_ioctl_dev_init init;
1049
1050         if (!capable(CAP_SYS_ADMIN))
1051                 return -EPERM;
1052
1053         if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1054                 return -EFAULT;
1055
1056         if (init.flags != 0) {
1057                 pr_err("nvm: no flags supported\n");
1058                 return -EINVAL;
1059         }
1060
1061         init.dev[DISK_NAME_LEN - 1] = '\0';
1062
1063         return __nvm_ioctl_dev_init(&init);
1064 }
1065
1066 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1067 {
1068         struct nvm_ioctl_dev_factory fact;
1069         struct nvm_dev *dev;
1070
1071         if (!capable(CAP_SYS_ADMIN))
1072                 return -EPERM;
1073
1074         if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1075                 return -EFAULT;
1076
1077         fact.dev[DISK_NAME_LEN - 1] = '\0';
1078
1079         if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1080                 return -EINVAL;
1081
1082         down_write(&nvm_lock);
1083         dev = nvm_find_nvm_dev(fact.dev);
1084         up_write(&nvm_lock);
1085         if (!dev) {
1086                 pr_err("nvm: device not found\n");
1087                 return -EINVAL;
1088         }
1089
1090         nvm_free_mgr(dev);
1091
1092         if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1093                 return nvm_dev_factory(dev, fact.flags);
1094
1095         return 0;
1096 }
1097
1098 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1099 {
1100         void __user *argp = (void __user *)arg;
1101
1102         switch (cmd) {
1103         case NVM_INFO:
1104                 return nvm_ioctl_info(file, argp);
1105         case NVM_GET_DEVICES:
1106                 return nvm_ioctl_get_devices(file, argp);
1107         case NVM_DEV_CREATE:
1108                 return nvm_ioctl_dev_create(file, argp);
1109         case NVM_DEV_REMOVE:
1110                 return nvm_ioctl_dev_remove(file, argp);
1111         case NVM_DEV_INIT:
1112                 return nvm_ioctl_dev_init(file, argp);
1113         case NVM_DEV_FACTORY:
1114                 return nvm_ioctl_dev_factory(file, argp);
1115         }
1116         return 0;
1117 }
1118
1119 static const struct file_operations _ctl_fops = {
1120         .open = nonseekable_open,
1121         .unlocked_ioctl = nvm_ctl_ioctl,
1122         .owner = THIS_MODULE,
1123         .llseek  = noop_llseek,
1124 };
1125
1126 static struct miscdevice _nvm_misc = {
1127         .minor          = MISC_DYNAMIC_MINOR,
1128         .name           = "lightnvm",
1129         .nodename       = "lightnvm/control",
1130         .fops           = &_ctl_fops,
1131 };
1132 builtin_misc_device(_nvm_misc);