lightnvm: pblk: add debug stat for read cache hits
[linux-block.git] / drivers / lightnvm / pblk-init.c
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * Implementation of a physical block-device target for Open-channel SSDs.
17  *
18  * pblk-init.c - pblk's initialization.
19  */
20
21 #include "pblk.h"
22
23 static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_r_rq_cache,
24                                         *pblk_w_rq_cache, *pblk_line_meta_cache;
25 static DECLARE_RWSEM(pblk_lock);
26 struct bio_set *pblk_bio_set;
27
28 static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
29                           struct bio *bio)
30 {
31         int ret;
32
33         /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
34          * constraint. Writes can be of arbitrary size.
35          */
36         if (bio_data_dir(bio) == READ) {
37                 blk_queue_split(q, &bio);
38                 ret = pblk_submit_read(pblk, bio);
39                 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
40                         bio_put(bio);
41
42                 return ret;
43         }
44
45         /* Prevent deadlock in the case of a modest LUN configuration and large
46          * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
47          * available for user I/O.
48          */
49         if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl)))
50                 blk_queue_split(q, &bio);
51
52         return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
53 }
54
55 static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
56 {
57         struct pblk *pblk = q->queuedata;
58
59         if (bio_op(bio) == REQ_OP_DISCARD) {
60                 pblk_discard(pblk, bio);
61                 if (!(bio->bi_opf & REQ_PREFLUSH)) {
62                         bio_endio(bio);
63                         return BLK_QC_T_NONE;
64                 }
65         }
66
67         switch (pblk_rw_io(q, pblk, bio)) {
68         case NVM_IO_ERR:
69                 bio_io_error(bio);
70                 break;
71         case NVM_IO_DONE:
72                 bio_endio(bio);
73                 break;
74         }
75
76         return BLK_QC_T_NONE;
77 }
78
79 static void pblk_l2p_free(struct pblk *pblk)
80 {
81         vfree(pblk->trans_map);
82 }
83
84 static int pblk_l2p_init(struct pblk *pblk)
85 {
86         sector_t i;
87         struct ppa_addr ppa;
88         int entry_size = 8;
89
90         if (pblk->ppaf_bitsize < 32)
91                 entry_size = 4;
92
93         pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
94         if (!pblk->trans_map)
95                 return -ENOMEM;
96
97         pblk_ppa_set_empty(&ppa);
98
99         for (i = 0; i < pblk->rl.nr_secs; i++)
100                 pblk_trans_map_set(pblk, i, ppa);
101
102         return 0;
103 }
104
105 static void pblk_rwb_free(struct pblk *pblk)
106 {
107         if (pblk_rb_tear_down_check(&pblk->rwb))
108                 pr_err("pblk: write buffer error on tear down\n");
109
110         pblk_rb_data_free(&pblk->rwb);
111         vfree(pblk_rb_entries_ref(&pblk->rwb));
112 }
113
114 static int pblk_rwb_init(struct pblk *pblk)
115 {
116         struct nvm_tgt_dev *dev = pblk->dev;
117         struct nvm_geo *geo = &dev->geo;
118         struct pblk_rb_entry *entries;
119         unsigned long nr_entries;
120         unsigned int power_size, power_seg_sz;
121
122         nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
123
124         entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
125         if (!entries)
126                 return -ENOMEM;
127
128         power_size = get_count_order(nr_entries);
129         power_seg_sz = get_count_order(geo->sec_size);
130
131         return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
132 }
133
134 /* Minimum pages needed within a lun */
135 #define PAGE_POOL_SIZE 16
136 #define ADDR_POOL_SIZE 64
137
138 static int pblk_set_ppaf(struct pblk *pblk)
139 {
140         struct nvm_tgt_dev *dev = pblk->dev;
141         struct nvm_geo *geo = &dev->geo;
142         struct nvm_addr_format ppaf = geo->ppaf;
143         int power_len;
144
145         /* Re-calculate channel and lun format to adapt to configuration */
146         power_len = get_count_order(geo->nr_chnls);
147         if (1 << power_len != geo->nr_chnls) {
148                 pr_err("pblk: supports only power-of-two channel config.\n");
149                 return -EINVAL;
150         }
151         ppaf.ch_len = power_len;
152
153         power_len = get_count_order(geo->luns_per_chnl);
154         if (1 << power_len != geo->luns_per_chnl) {
155                 pr_err("pblk: supports only power-of-two LUN config.\n");
156                 return -EINVAL;
157         }
158         ppaf.lun_len = power_len;
159
160         pblk->ppaf.sec_offset = 0;
161         pblk->ppaf.pln_offset = ppaf.sect_len;
162         pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
163         pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
164         pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
165         pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
166         pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
167         pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
168                                                         pblk->ppaf.pln_offset;
169         pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
170                                                         pblk->ppaf.ch_offset;
171         pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
172                                                         pblk->ppaf.lun_offset;
173         pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
174                                                         pblk->ppaf.pg_offset;
175         pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
176                                                         pblk->ppaf.blk_offset;
177
178         pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
179
180         return 0;
181 }
182
183 static int pblk_init_global_caches(struct pblk *pblk)
184 {
185         char cache_name[PBLK_CACHE_NAME_LEN];
186
187         down_write(&pblk_lock);
188         pblk_blk_ws_cache = kmem_cache_create("pblk_blk_ws",
189                                 sizeof(struct pblk_line_ws), 0, 0, NULL);
190         if (!pblk_blk_ws_cache) {
191                 up_write(&pblk_lock);
192                 return -ENOMEM;
193         }
194
195         pblk_rec_cache = kmem_cache_create("pblk_rec",
196                                 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
197         if (!pblk_rec_cache) {
198                 kmem_cache_destroy(pblk_blk_ws_cache);
199                 up_write(&pblk_lock);
200                 return -ENOMEM;
201         }
202
203         pblk_r_rq_cache = kmem_cache_create("pblk_r_rq", pblk_r_rq_size,
204                                 0, 0, NULL);
205         if (!pblk_r_rq_cache) {
206                 kmem_cache_destroy(pblk_blk_ws_cache);
207                 kmem_cache_destroy(pblk_rec_cache);
208                 up_write(&pblk_lock);
209                 return -ENOMEM;
210         }
211
212         pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
213                                 0, 0, NULL);
214         if (!pblk_w_rq_cache) {
215                 kmem_cache_destroy(pblk_blk_ws_cache);
216                 kmem_cache_destroy(pblk_rec_cache);
217                 kmem_cache_destroy(pblk_r_rq_cache);
218                 up_write(&pblk_lock);
219                 return -ENOMEM;
220         }
221
222         snprintf(cache_name, sizeof(cache_name), "pblk_line_m_%s",
223                                                         pblk->disk->disk_name);
224         pblk_line_meta_cache = kmem_cache_create(cache_name,
225                                 pblk->lm.sec_bitmap_len, 0, 0, NULL);
226         if (!pblk_line_meta_cache) {
227                 kmem_cache_destroy(pblk_blk_ws_cache);
228                 kmem_cache_destroy(pblk_rec_cache);
229                 kmem_cache_destroy(pblk_r_rq_cache);
230                 kmem_cache_destroy(pblk_w_rq_cache);
231                 up_write(&pblk_lock);
232                 return -ENOMEM;
233         }
234         up_write(&pblk_lock);
235
236         return 0;
237 }
238
239 static int pblk_core_init(struct pblk *pblk)
240 {
241         struct nvm_tgt_dev *dev = pblk->dev;
242         struct nvm_geo *geo = &dev->geo;
243         int max_write_ppas;
244         int mod;
245
246         pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
247         max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
248         pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
249                                 max_write_ppas : nvm_max_phys_sects(dev);
250         pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
251                                                 geo->nr_planes * geo->nr_luns;
252
253         if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
254                 pr_err("pblk: cannot support device max_phys_sect\n");
255                 return -EINVAL;
256         }
257
258         div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
259         if (mod) {
260                 pr_err("pblk: bad configuration of sectors/pages\n");
261                 return -EINVAL;
262         }
263
264         if (pblk_init_global_caches(pblk))
265                 return -ENOMEM;
266
267         pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
268         if (!pblk->page_pool)
269                 return -ENOMEM;
270
271         pblk->line_ws_pool = mempool_create_slab_pool(geo->nr_luns,
272                                                         pblk_blk_ws_cache);
273         if (!pblk->line_ws_pool)
274                 goto free_page_pool;
275
276         pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
277         if (!pblk->rec_pool)
278                 goto free_blk_ws_pool;
279
280         pblk->r_rq_pool = mempool_create_slab_pool(64, pblk_r_rq_cache);
281         if (!pblk->r_rq_pool)
282                 goto free_rec_pool;
283
284         pblk->w_rq_pool = mempool_create_slab_pool(64, pblk_w_rq_cache);
285         if (!pblk->w_rq_pool)
286                 goto free_r_rq_pool;
287
288         pblk->line_meta_pool =
289                         mempool_create_slab_pool(16, pblk_line_meta_cache);
290         if (!pblk->line_meta_pool)
291                 goto free_w_rq_pool;
292
293         pblk->kw_wq = alloc_workqueue("pblk-aux-wq",
294                                         WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
295         if (!pblk->kw_wq)
296                 goto free_line_meta_pool;
297
298         if (pblk_set_ppaf(pblk))
299                 goto free_kw_wq;
300
301         if (pblk_rwb_init(pblk))
302                 goto free_kw_wq;
303
304         INIT_LIST_HEAD(&pblk->compl_list);
305         return 0;
306
307 free_kw_wq:
308         destroy_workqueue(pblk->kw_wq);
309 free_line_meta_pool:
310         mempool_destroy(pblk->line_meta_pool);
311 free_w_rq_pool:
312         mempool_destroy(pblk->w_rq_pool);
313 free_r_rq_pool:
314         mempool_destroy(pblk->r_rq_pool);
315 free_rec_pool:
316         mempool_destroy(pblk->rec_pool);
317 free_blk_ws_pool:
318         mempool_destroy(pblk->line_ws_pool);
319 free_page_pool:
320         mempool_destroy(pblk->page_pool);
321         return -ENOMEM;
322 }
323
324 static void pblk_core_free(struct pblk *pblk)
325 {
326         if (pblk->kw_wq)
327                 destroy_workqueue(pblk->kw_wq);
328
329         mempool_destroy(pblk->page_pool);
330         mempool_destroy(pblk->line_ws_pool);
331         mempool_destroy(pblk->rec_pool);
332         mempool_destroy(pblk->r_rq_pool);
333         mempool_destroy(pblk->w_rq_pool);
334         mempool_destroy(pblk->line_meta_pool);
335
336         kmem_cache_destroy(pblk_blk_ws_cache);
337         kmem_cache_destroy(pblk_rec_cache);
338         kmem_cache_destroy(pblk_r_rq_cache);
339         kmem_cache_destroy(pblk_w_rq_cache);
340         kmem_cache_destroy(pblk_line_meta_cache);
341 }
342
343 static void pblk_luns_free(struct pblk *pblk)
344 {
345         kfree(pblk->luns);
346 }
347
348 static void pblk_lines_free(struct pblk *pblk)
349 {
350         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
351         struct pblk_line *line;
352         int i;
353
354         spin_lock(&l_mg->free_lock);
355         for (i = 0; i < l_mg->nr_lines; i++) {
356                 line = &pblk->lines[i];
357
358                 pblk_line_free(pblk, line);
359                 kfree(line->blk_bitmap);
360                 kfree(line->erase_bitmap);
361         }
362         spin_unlock(&l_mg->free_lock);
363 }
364
365 static void pblk_line_meta_free(struct pblk *pblk)
366 {
367         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
368         int i;
369
370         kfree(l_mg->bb_template);
371         kfree(l_mg->bb_aux);
372
373         for (i = 0; i < PBLK_DATA_LINES; i++) {
374                 pblk_mfree(l_mg->sline_meta[i].meta, l_mg->smeta_alloc_type);
375                 pblk_mfree(l_mg->eline_meta[i].meta, l_mg->emeta_alloc_type);
376         }
377
378         kfree(pblk->lines);
379 }
380
381 static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
382 {
383         struct nvm_geo *geo = &dev->geo;
384         struct ppa_addr ppa;
385         u8 *blks;
386         int nr_blks, ret;
387
388         nr_blks = geo->blks_per_lun * geo->plane_mode;
389         blks = kmalloc(nr_blks, GFP_KERNEL);
390         if (!blks)
391                 return -ENOMEM;
392
393         ppa.ppa = 0;
394         ppa.g.ch = rlun->bppa.g.ch;
395         ppa.g.lun = rlun->bppa.g.lun;
396
397         ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
398         if (ret)
399                 goto out;
400
401         nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
402         if (nr_blks < 0) {
403                 ret = nr_blks;
404                 goto out;
405         }
406
407         rlun->bb_list = blks;
408
409         return 0;
410 out:
411         kfree(blks);
412         return ret;
413 }
414
415 static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line)
416 {
417         struct pblk_line_meta *lm = &pblk->lm;
418         struct pblk_lun *rlun;
419         int bb_cnt = 0;
420         int i;
421
422         line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
423         if (!line->blk_bitmap)
424                 return -ENOMEM;
425
426         line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
427         if (!line->erase_bitmap) {
428                 kfree(line->blk_bitmap);
429                 return -ENOMEM;
430         }
431
432         for (i = 0; i < lm->blk_per_line; i++) {
433                 rlun = &pblk->luns[i];
434                 if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
435                         continue;
436
437                 set_bit(i, line->blk_bitmap);
438                 bb_cnt++;
439         }
440
441         return bb_cnt;
442 }
443
444 static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
445 {
446         struct nvm_tgt_dev *dev = pblk->dev;
447         struct nvm_geo *geo = &dev->geo;
448         struct pblk_lun *rlun;
449         int i, ret;
450
451         /* TODO: Implement unbalanced LUN support */
452         if (geo->luns_per_chnl < 0) {
453                 pr_err("pblk: unbalanced LUN config.\n");
454                 return -EINVAL;
455         }
456
457         pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
458         if (!pblk->luns)
459                 return -ENOMEM;
460
461         for (i = 0; i < geo->nr_luns; i++) {
462                 /* Stripe across channels */
463                 int ch = i % geo->nr_chnls;
464                 int lun_raw = i / geo->nr_chnls;
465                 int lunid = lun_raw + ch * geo->luns_per_chnl;
466
467                 rlun = &pblk->luns[i];
468                 rlun->bppa = luns[lunid];
469
470                 sema_init(&rlun->wr_sem, 1);
471
472                 ret = pblk_bb_discovery(dev, rlun);
473                 if (ret) {
474                         while (--i >= 0)
475                                 kfree(pblk->luns[i].bb_list);
476                         return ret;
477                 }
478         }
479
480         return 0;
481 }
482
483 static int pblk_lines_configure(struct pblk *pblk, int flags)
484 {
485         struct pblk_line *line = NULL;
486         int ret = 0;
487
488         if (!(flags & NVM_TARGET_FACTORY)) {
489                 line = pblk_recov_l2p(pblk);
490                 if (IS_ERR(line)) {
491                         pr_err("pblk: could not recover l2p table\n");
492                         ret = -EFAULT;
493                 }
494         }
495
496         if (!line) {
497                 /* Configure next line for user data */
498                 line = pblk_line_get_first_data(pblk);
499                 if (!line) {
500                         pr_err("pblk: line list corrupted\n");
501                         ret = -EFAULT;
502                 }
503         }
504
505         return ret;
506 }
507
508 /* See comment over struct line_emeta definition */
509 static unsigned int calc_emeta_len(struct pblk *pblk, struct pblk_line_meta *lm)
510 {
511         return (sizeof(struct line_emeta) +
512                         ((lm->sec_per_line - lm->emeta_sec) * sizeof(u64)) +
513                         (pblk->l_mg.nr_lines * sizeof(u32)) +
514                         lm->blk_bitmap_len);
515 }
516
517 static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
518 {
519         struct nvm_tgt_dev *dev = pblk->dev;
520         struct nvm_geo *geo = &dev->geo;
521         sector_t provisioned;
522
523         pblk->over_pct = 20;
524
525         provisioned = nr_free_blks;
526         provisioned *= (100 - pblk->over_pct);
527         sector_div(provisioned, 100);
528
529         /* Internally pblk manages all free blocks, but all calculations based
530          * on user capacity consider only provisioned blocks
531          */
532         pblk->rl.total_blocks = nr_free_blks;
533         pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
534         pblk->capacity = provisioned * geo->sec_per_blk;
535         atomic_set(&pblk->rl.free_blocks, nr_free_blks);
536 }
537
538 static int pblk_lines_init(struct pblk *pblk)
539 {
540         struct nvm_tgt_dev *dev = pblk->dev;
541         struct nvm_geo *geo = &dev->geo;
542         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
543         struct pblk_line_meta *lm = &pblk->lm;
544         struct pblk_line *line;
545         unsigned int smeta_len, emeta_len;
546         long nr_bad_blks, nr_meta_blks, nr_free_blks;
547         int bb_distance;
548         int i;
549         int ret;
550
551         lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
552         lm->blk_per_line = geo->nr_luns;
553         lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
554         lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
555         lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
556         lm->high_thrs = lm->sec_per_line / 2;
557         lm->mid_thrs = lm->sec_per_line / 4;
558
559         /* Calculate necessary pages for smeta. See comment over struct
560          * line_smeta definition
561          */
562         lm->smeta_len = sizeof(struct line_smeta) +
563                                 PBLK_LINE_NR_LUN_BITMAP * lm->lun_bitmap_len;
564
565         i = 1;
566 add_smeta_page:
567         lm->smeta_sec = i * geo->sec_per_pl;
568         lm->smeta_len = lm->smeta_sec * geo->sec_size;
569
570         smeta_len = sizeof(struct line_smeta) +
571                                 PBLK_LINE_NR_LUN_BITMAP * lm->lun_bitmap_len;
572         if (smeta_len > lm->smeta_len) {
573                 i++;
574                 goto add_smeta_page;
575         }
576
577         /* Calculate necessary pages for emeta. See comment over struct
578          * line_emeta definition
579          */
580         i = 1;
581 add_emeta_page:
582         lm->emeta_sec = i * geo->sec_per_pl;
583         lm->emeta_len = lm->emeta_sec * geo->sec_size;
584
585         emeta_len = calc_emeta_len(pblk, lm);
586         if (emeta_len > lm->emeta_len) {
587                 i++;
588                 goto add_emeta_page;
589         }
590         lm->emeta_bb = geo->nr_luns - i;
591
592         nr_meta_blks = (lm->smeta_sec + lm->emeta_sec +
593                                 (geo->sec_per_blk / 2)) / geo->sec_per_blk;
594         lm->min_blk_line = nr_meta_blks + 1;
595
596         l_mg->nr_lines = geo->blks_per_lun;
597         l_mg->log_line = l_mg->data_line = NULL;
598         l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
599         l_mg->nr_free_lines = 0;
600         bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
601
602         /* smeta is always small enough to fit on a kmalloc memory allocation,
603          * emeta depends on the number of LUNs allocated to the pblk instance
604          */
605         l_mg->smeta_alloc_type = PBLK_KMALLOC_META;
606         for (i = 0; i < PBLK_DATA_LINES; i++) {
607                 l_mg->sline_meta[i].meta = kmalloc(lm->smeta_len, GFP_KERNEL);
608                 if (!l_mg->sline_meta[i].meta)
609                         while (--i >= 0) {
610                                 kfree(l_mg->sline_meta[i].meta);
611                                 ret = -ENOMEM;
612                                 goto fail;
613                         }
614         }
615
616         if (lm->emeta_len > KMALLOC_MAX_CACHE_SIZE) {
617                 l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
618
619                 for (i = 0; i < PBLK_DATA_LINES; i++) {
620                         l_mg->eline_meta[i].meta = vmalloc(lm->emeta_len);
621                         if (!l_mg->eline_meta[i].meta)
622                                 while (--i >= 0) {
623                                         vfree(l_mg->eline_meta[i].meta);
624                                         ret = -ENOMEM;
625                                         goto fail;
626                                 }
627                 }
628         } else {
629                 l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
630
631                 for (i = 0; i < PBLK_DATA_LINES; i++) {
632                         l_mg->eline_meta[i].meta =
633                                         kmalloc(lm->emeta_len, GFP_KERNEL);
634                         if (!l_mg->eline_meta[i].meta)
635                                 while (--i >= 0) {
636                                         kfree(l_mg->eline_meta[i].meta);
637                                         ret = -ENOMEM;
638                                         goto fail;
639                                 }
640                 }
641         }
642
643         l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
644         if (!l_mg->bb_template) {
645                 ret = -ENOMEM;
646                 goto fail_free_meta;
647         }
648
649         l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
650         if (!l_mg->bb_aux) {
651                 ret = -ENOMEM;
652                 goto fail_free_bb_template;
653         }
654
655         bb_distance = (geo->nr_luns) * geo->sec_per_pl;
656         for (i = 0; i < lm->sec_per_line; i += bb_distance)
657                 bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
658
659         INIT_LIST_HEAD(&l_mg->free_list);
660         INIT_LIST_HEAD(&l_mg->corrupt_list);
661         INIT_LIST_HEAD(&l_mg->bad_list);
662         INIT_LIST_HEAD(&l_mg->gc_full_list);
663         INIT_LIST_HEAD(&l_mg->gc_high_list);
664         INIT_LIST_HEAD(&l_mg->gc_mid_list);
665         INIT_LIST_HEAD(&l_mg->gc_low_list);
666         INIT_LIST_HEAD(&l_mg->gc_empty_list);
667
668         l_mg->gc_lists[0] = &l_mg->gc_high_list;
669         l_mg->gc_lists[1] = &l_mg->gc_mid_list;
670         l_mg->gc_lists[2] = &l_mg->gc_low_list;
671
672         spin_lock_init(&l_mg->free_lock);
673         spin_lock_init(&l_mg->gc_lock);
674
675         pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
676                                                                 GFP_KERNEL);
677         if (!pblk->lines) {
678                 ret = -ENOMEM;
679                 goto fail_free_bb_aux;
680         }
681
682         nr_free_blks = 0;
683         for (i = 0; i < l_mg->nr_lines; i++) {
684                 int blk_in_line;
685
686                 line = &pblk->lines[i];
687
688                 line->pblk = pblk;
689                 line->id = i;
690                 line->type = PBLK_LINETYPE_FREE;
691                 line->state = PBLK_LINESTATE_FREE;
692                 line->gc_group = PBLK_LINEGC_NONE;
693                 spin_lock_init(&line->lock);
694
695                 nr_bad_blks = pblk_bb_line(pblk, line);
696                 if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
697                         ret = -EINVAL;
698                         goto fail_free_lines;
699                 }
700
701                 blk_in_line = lm->blk_per_line - nr_bad_blks;
702                 if (blk_in_line < lm->min_blk_line) {
703                         line->state = PBLK_LINESTATE_BAD;
704                         list_add_tail(&line->list, &l_mg->bad_list);
705                         continue;
706                 }
707
708                 nr_free_blks += blk_in_line;
709                 atomic_set(&line->blk_in_line, blk_in_line);
710
711                 l_mg->nr_free_lines++;
712                 list_add_tail(&line->list, &l_mg->free_list);
713         }
714
715         pblk_set_provision(pblk, nr_free_blks);
716
717         sema_init(&pblk->erase_sem, 1);
718
719         /* Cleanup per-LUN bad block lists - managed within lines on run-time */
720         for (i = 0; i < geo->nr_luns; i++)
721                 kfree(pblk->luns[i].bb_list);
722
723         return 0;
724 fail_free_lines:
725         kfree(pblk->lines);
726 fail_free_bb_aux:
727         kfree(l_mg->bb_aux);
728 fail_free_bb_template:
729         kfree(l_mg->bb_template);
730 fail_free_meta:
731         for (i = 0; i < PBLK_DATA_LINES; i++) {
732                 pblk_mfree(l_mg->sline_meta[i].meta, l_mg->smeta_alloc_type);
733                 pblk_mfree(l_mg->eline_meta[i].meta, l_mg->emeta_alloc_type);
734         }
735 fail:
736         for (i = 0; i < geo->nr_luns; i++)
737                 kfree(pblk->luns[i].bb_list);
738
739         return ret;
740 }
741
742 static int pblk_writer_init(struct pblk *pblk)
743 {
744         setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
745         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
746
747         pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
748         if (IS_ERR(pblk->writer_ts)) {
749                 pr_err("pblk: could not allocate writer kthread\n");
750                 return PTR_ERR(pblk->writer_ts);
751         }
752
753         return 0;
754 }
755
756 static void pblk_writer_stop(struct pblk *pblk)
757 {
758         if (pblk->writer_ts)
759                 kthread_stop(pblk->writer_ts);
760         del_timer(&pblk->wtimer);
761 }
762
763 static void pblk_free(struct pblk *pblk)
764 {
765         pblk_luns_free(pblk);
766         pblk_lines_free(pblk);
767         pblk_line_meta_free(pblk);
768         pblk_core_free(pblk);
769         pblk_l2p_free(pblk);
770
771         kfree(pblk);
772 }
773
774 static void pblk_tear_down(struct pblk *pblk)
775 {
776         pblk_flush_writer(pblk);
777         pblk_writer_stop(pblk);
778         pblk_rb_sync_l2p(&pblk->rwb);
779         pblk_recov_pad(pblk);
780         pblk_rwb_free(pblk);
781         pblk_rl_free(&pblk->rl);
782
783         pr_debug("pblk: consistent tear down\n");
784 }
785
786 static void pblk_exit(void *private)
787 {
788         struct pblk *pblk = private;
789
790         down_write(&pblk_lock);
791         pblk_gc_exit(pblk);
792         pblk_tear_down(pblk);
793         pblk_free(pblk);
794         up_write(&pblk_lock);
795 }
796
797 static sector_t pblk_capacity(void *private)
798 {
799         struct pblk *pblk = private;
800
801         return pblk->capacity * NR_PHY_IN_LOG;
802 }
803
804 static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
805                        int flags)
806 {
807         struct nvm_geo *geo = &dev->geo;
808         struct request_queue *bqueue = dev->q;
809         struct request_queue *tqueue = tdisk->queue;
810         struct pblk *pblk;
811         int ret;
812
813         if (dev->identity.dom & NVM_RSP_L2P) {
814                 pr_err("pblk: device-side L2P table not supported. (%x)\n",
815                                                         dev->identity.dom);
816                 return ERR_PTR(-EINVAL);
817         }
818
819         pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
820         if (!pblk)
821                 return ERR_PTR(-ENOMEM);
822
823         pblk->dev = dev;
824         pblk->disk = tdisk;
825
826         spin_lock_init(&pblk->trans_lock);
827         spin_lock_init(&pblk->lock);
828
829         if (flags & NVM_TARGET_FACTORY)
830                 pblk_setup_uuid(pblk);
831
832 #ifdef CONFIG_NVM_DEBUG
833         atomic_long_set(&pblk->inflight_writes, 0);
834         atomic_long_set(&pblk->padded_writes, 0);
835         atomic_long_set(&pblk->padded_wb, 0);
836         atomic_long_set(&pblk->nr_flush, 0);
837         atomic_long_set(&pblk->req_writes, 0);
838         atomic_long_set(&pblk->sub_writes, 0);
839         atomic_long_set(&pblk->sync_writes, 0);
840         atomic_long_set(&pblk->compl_writes, 0);
841         atomic_long_set(&pblk->inflight_reads, 0);
842         atomic_long_set(&pblk->cache_reads, 0);
843         atomic_long_set(&pblk->sync_reads, 0);
844         atomic_long_set(&pblk->recov_writes, 0);
845         atomic_long_set(&pblk->recov_writes, 0);
846         atomic_long_set(&pblk->recov_gc_writes, 0);
847 #endif
848
849         atomic_long_set(&pblk->read_failed, 0);
850         atomic_long_set(&pblk->read_empty, 0);
851         atomic_long_set(&pblk->read_high_ecc, 0);
852         atomic_long_set(&pblk->read_failed_gc, 0);
853         atomic_long_set(&pblk->write_failed, 0);
854         atomic_long_set(&pblk->erase_failed, 0);
855
856         ret = pblk_luns_init(pblk, dev->luns);
857         if (ret) {
858                 pr_err("pblk: could not initialize luns\n");
859                 goto fail;
860         }
861
862         ret = pblk_lines_init(pblk);
863         if (ret) {
864                 pr_err("pblk: could not initialize lines\n");
865                 goto fail_free_luns;
866         }
867
868         ret = pblk_core_init(pblk);
869         if (ret) {
870                 pr_err("pblk: could not initialize core\n");
871                 goto fail_free_line_meta;
872         }
873
874         ret = pblk_l2p_init(pblk);
875         if (ret) {
876                 pr_err("pblk: could not initialize maps\n");
877                 goto fail_free_core;
878         }
879
880         ret = pblk_lines_configure(pblk, flags);
881         if (ret) {
882                 pr_err("pblk: could not configure lines\n");
883                 goto fail_free_l2p;
884         }
885
886         ret = pblk_writer_init(pblk);
887         if (ret) {
888                 pr_err("pblk: could not initialize write thread\n");
889                 goto fail_free_lines;
890         }
891
892         ret = pblk_gc_init(pblk);
893         if (ret) {
894                 pr_err("pblk: could not initialize gc\n");
895                 goto fail_stop_writer;
896         }
897
898         /* inherit the size from the underlying device */
899         blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
900         blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
901
902         blk_queue_write_cache(tqueue, true, false);
903
904         tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
905         tqueue->limits.discard_alignment = 0;
906         blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
907         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
908
909         pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
910                         geo->nr_luns, pblk->l_mg.nr_lines,
911                         (unsigned long long)pblk->rl.nr_secs,
912                         pblk->rwb.nr_entries);
913
914         wake_up_process(pblk->writer_ts);
915         return pblk;
916
917 fail_stop_writer:
918         pblk_writer_stop(pblk);
919 fail_free_lines:
920         pblk_lines_free(pblk);
921 fail_free_l2p:
922         pblk_l2p_free(pblk);
923 fail_free_core:
924         pblk_core_free(pblk);
925 fail_free_line_meta:
926         pblk_line_meta_free(pblk);
927 fail_free_luns:
928         pblk_luns_free(pblk);
929 fail:
930         kfree(pblk);
931         return ERR_PTR(ret);
932 }
933
934 /* physical block device target */
935 static struct nvm_tgt_type tt_pblk = {
936         .name           = "pblk",
937         .version        = {1, 0, 0},
938
939         .make_rq        = pblk_make_rq,
940         .capacity       = pblk_capacity,
941
942         .init           = pblk_init,
943         .exit           = pblk_exit,
944
945         .sysfs_init     = pblk_sysfs_init,
946         .sysfs_exit     = pblk_sysfs_exit,
947 };
948
949 static int __init pblk_module_init(void)
950 {
951         int ret;
952
953         pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
954         if (!pblk_bio_set)
955                 return -ENOMEM;
956         ret = nvm_register_tgt_type(&tt_pblk);
957         if (ret)
958                 bioset_free(pblk_bio_set);
959         return ret;
960 }
961
962 static void pblk_module_exit(void)
963 {
964         bioset_free(pblk_bio_set);
965         nvm_unregister_tgt_type(&tt_pblk);
966 }
967
968 module_init(pblk_module_init);
969 module_exit(pblk_module_exit);
970 MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
971 MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
972 MODULE_LICENSE("GPL v2");
973 MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");