2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Implementation of a physical block-device target for Open-channel SSDs.
18 * pblk-init.c - pblk's initialization.
23 static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
25 static DECLARE_RWSEM(pblk_lock);
26 struct bio_set *pblk_bio_set;
28 static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
33 /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
34 * constraint. Writes can be of arbitrary size.
36 if (bio_data_dir(bio) == READ) {
37 blk_queue_split(q, &bio);
38 ret = pblk_submit_read(pblk, bio);
39 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
45 /* Prevent deadlock in the case of a modest LUN configuration and large
46 * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
47 * available for user I/O.
49 if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
50 blk_queue_split(q, &bio);
52 return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
55 static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
57 struct pblk *pblk = q->queuedata;
59 if (bio_op(bio) == REQ_OP_DISCARD) {
60 pblk_discard(pblk, bio);
61 if (!(bio->bi_opf & REQ_PREFLUSH)) {
67 switch (pblk_rw_io(q, pblk, bio)) {
79 static void pblk_l2p_free(struct pblk *pblk)
81 vfree(pblk->trans_map);
84 static int pblk_l2p_init(struct pblk *pblk)
90 if (pblk->ppaf_bitsize < 32)
93 pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
97 pblk_ppa_set_empty(&ppa);
99 for (i = 0; i < pblk->rl.nr_secs; i++)
100 pblk_trans_map_set(pblk, i, ppa);
105 static void pblk_rwb_free(struct pblk *pblk)
107 if (pblk_rb_tear_down_check(&pblk->rwb))
108 pr_err("pblk: write buffer error on tear down\n");
110 pblk_rb_data_free(&pblk->rwb);
111 vfree(pblk_rb_entries_ref(&pblk->rwb));
114 static int pblk_rwb_init(struct pblk *pblk)
116 struct nvm_tgt_dev *dev = pblk->dev;
117 struct nvm_geo *geo = &dev->geo;
118 struct pblk_rb_entry *entries;
119 unsigned long nr_entries;
120 unsigned int power_size, power_seg_sz;
122 nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
124 entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
128 power_size = get_count_order(nr_entries);
129 power_seg_sz = get_count_order(geo->sec_size);
131 return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
134 /* Minimum pages needed within a lun */
135 #define ADDR_POOL_SIZE 64
137 static int pblk_set_ppaf(struct pblk *pblk)
139 struct nvm_tgt_dev *dev = pblk->dev;
140 struct nvm_geo *geo = &dev->geo;
141 struct nvm_addr_format ppaf = geo->ppaf;
144 /* Re-calculate channel and lun format to adapt to configuration */
145 power_len = get_count_order(geo->nr_chnls);
146 if (1 << power_len != geo->nr_chnls) {
147 pr_err("pblk: supports only power-of-two channel config.\n");
150 ppaf.ch_len = power_len;
152 power_len = get_count_order(geo->luns_per_chnl);
153 if (1 << power_len != geo->luns_per_chnl) {
154 pr_err("pblk: supports only power-of-two LUN config.\n");
157 ppaf.lun_len = power_len;
159 pblk->ppaf.sec_offset = 0;
160 pblk->ppaf.pln_offset = ppaf.sect_len;
161 pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
162 pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
163 pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
164 pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
165 pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
166 pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
167 pblk->ppaf.pln_offset;
168 pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
169 pblk->ppaf.ch_offset;
170 pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
171 pblk->ppaf.lun_offset;
172 pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
173 pblk->ppaf.pg_offset;
174 pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
175 pblk->ppaf.blk_offset;
177 pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
182 static int pblk_init_global_caches(struct pblk *pblk)
184 down_write(&pblk_lock);
185 pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
186 sizeof(struct pblk_line_ws), 0, 0, NULL);
187 if (!pblk_ws_cache) {
188 up_write(&pblk_lock);
192 pblk_rec_cache = kmem_cache_create("pblk_rec",
193 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
194 if (!pblk_rec_cache) {
195 kmem_cache_destroy(pblk_ws_cache);
196 up_write(&pblk_lock);
200 pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
202 if (!pblk_g_rq_cache) {
203 kmem_cache_destroy(pblk_ws_cache);
204 kmem_cache_destroy(pblk_rec_cache);
205 up_write(&pblk_lock);
209 pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
211 if (!pblk_w_rq_cache) {
212 kmem_cache_destroy(pblk_ws_cache);
213 kmem_cache_destroy(pblk_rec_cache);
214 kmem_cache_destroy(pblk_g_rq_cache);
215 up_write(&pblk_lock);
218 up_write(&pblk_lock);
223 static void pblk_free_global_caches(struct pblk *pblk)
225 kmem_cache_destroy(pblk_ws_cache);
226 kmem_cache_destroy(pblk_rec_cache);
227 kmem_cache_destroy(pblk_g_rq_cache);
228 kmem_cache_destroy(pblk_w_rq_cache);
231 static int pblk_core_init(struct pblk *pblk)
233 struct nvm_tgt_dev *dev = pblk->dev;
234 struct nvm_geo *geo = &dev->geo;
236 pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
237 geo->nr_planes * geo->nr_luns;
239 if (pblk_init_global_caches(pblk))
242 /* Internal bios can be at most the sectors signaled by the device. */
243 pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
245 if (!pblk->page_bio_pool)
246 goto free_global_caches;
248 pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
250 if (!pblk->gen_ws_pool)
251 goto free_page_bio_pool;
253 pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
255 goto free_gen_ws_pool;
257 pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
259 if (!pblk->r_rq_pool)
262 pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
264 if (!pblk->e_rq_pool)
267 pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
269 if (!pblk->w_rq_pool)
272 pblk->close_wq = alloc_workqueue("pblk-close-wq",
273 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
277 pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
278 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
282 pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
283 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
287 if (pblk_set_ppaf(pblk))
290 if (pblk_rwb_init(pblk))
293 INIT_LIST_HEAD(&pblk->compl_list);
297 destroy_workqueue(pblk->r_end_wq);
299 destroy_workqueue(pblk->bb_wq);
301 destroy_workqueue(pblk->close_wq);
303 mempool_destroy(pblk->w_rq_pool);
305 mempool_destroy(pblk->e_rq_pool);
307 mempool_destroy(pblk->r_rq_pool);
309 mempool_destroy(pblk->rec_pool);
311 mempool_destroy(pblk->gen_ws_pool);
313 mempool_destroy(pblk->page_bio_pool);
315 pblk_free_global_caches(pblk);
319 static void pblk_core_free(struct pblk *pblk)
322 destroy_workqueue(pblk->close_wq);
325 destroy_workqueue(pblk->r_end_wq);
328 destroy_workqueue(pblk->bb_wq);
330 mempool_destroy(pblk->page_bio_pool);
331 mempool_destroy(pblk->gen_ws_pool);
332 mempool_destroy(pblk->rec_pool);
333 mempool_destroy(pblk->r_rq_pool);
334 mempool_destroy(pblk->e_rq_pool);
335 mempool_destroy(pblk->w_rq_pool);
337 pblk_free_global_caches(pblk);
340 static void pblk_luns_free(struct pblk *pblk)
345 static void pblk_free_line_bitmaps(struct pblk_line *line)
347 kfree(line->blk_bitmap);
348 kfree(line->erase_bitmap);
351 static void pblk_lines_free(struct pblk *pblk)
353 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
354 struct pblk_line *line;
357 spin_lock(&l_mg->free_lock);
358 for (i = 0; i < l_mg->nr_lines; i++) {
359 line = &pblk->lines[i];
361 pblk_line_free(pblk, line);
362 pblk_free_line_bitmaps(line);
364 spin_unlock(&l_mg->free_lock);
367 static void pblk_line_meta_free(struct pblk *pblk)
369 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
372 kfree(l_mg->bb_template);
374 kfree(l_mg->vsc_list);
376 spin_lock(&l_mg->free_lock);
377 for (i = 0; i < PBLK_DATA_LINES; i++) {
378 kfree(l_mg->sline_meta[i]);
379 pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
380 kfree(l_mg->eline_meta[i]);
382 spin_unlock(&l_mg->free_lock);
387 static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
389 struct nvm_geo *geo = &dev->geo;
394 nr_blks = geo->blks_per_lun * geo->plane_mode;
395 blks = kmalloc(nr_blks, GFP_KERNEL);
400 ppa.g.ch = rlun->bppa.g.ch;
401 ppa.g.lun = rlun->bppa.g.lun;
403 ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
407 nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
413 rlun->bb_list = blks;
421 static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
424 struct nvm_tgt_dev *dev = pblk->dev;
425 struct nvm_geo *geo = &dev->geo;
426 struct pblk_lun *rlun;
430 for (i = 0; i < blk_per_line; i++) {
431 rlun = &pblk->luns[i];
432 if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
435 set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
442 static int pblk_alloc_line_bitmaps(struct pblk *pblk, struct pblk_line *line)
444 struct pblk_line_meta *lm = &pblk->lm;
446 line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
447 if (!line->blk_bitmap)
450 line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
451 if (!line->erase_bitmap) {
452 kfree(line->blk_bitmap);
459 static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
461 struct nvm_tgt_dev *dev = pblk->dev;
462 struct nvm_geo *geo = &dev->geo;
463 struct pblk_lun *rlun;
466 /* TODO: Implement unbalanced LUN support */
467 if (geo->luns_per_chnl < 0) {
468 pr_err("pblk: unbalanced LUN config.\n");
472 pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
476 for (i = 0; i < geo->nr_luns; i++) {
477 /* Stripe across channels */
478 int ch = i % geo->nr_chnls;
479 int lun_raw = i / geo->nr_chnls;
480 int lunid = lun_raw + ch * geo->luns_per_chnl;
482 rlun = &pblk->luns[i];
483 rlun->bppa = luns[lunid];
485 sema_init(&rlun->wr_sem, 1);
487 ret = pblk_bb_discovery(dev, rlun);
490 kfree(pblk->luns[i].bb_list);
498 static int pblk_lines_configure(struct pblk *pblk, int flags)
500 struct pblk_line *line = NULL;
503 if (!(flags & NVM_TARGET_FACTORY)) {
504 line = pblk_recov_l2p(pblk);
506 pr_err("pblk: could not recover l2p table\n");
512 /* Configure next line for user data */
513 line = pblk_line_get_first_data(pblk);
515 pr_err("pblk: line list corrupted\n");
523 /* See comment over struct line_emeta definition */
524 static unsigned int calc_emeta_len(struct pblk *pblk)
526 struct pblk_line_meta *lm = &pblk->lm;
527 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
528 struct nvm_tgt_dev *dev = pblk->dev;
529 struct nvm_geo *geo = &dev->geo;
531 /* Round to sector size so that lba_list starts on its own sector */
532 lm->emeta_sec[1] = DIV_ROUND_UP(
533 sizeof(struct line_emeta) + lm->blk_bitmap_len,
535 lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;
537 /* Round to sector size so that vsc_list starts on its own sector */
538 lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
539 lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
541 lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;
543 lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
545 lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;
547 lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
549 return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
552 static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
554 struct nvm_tgt_dev *dev = pblk->dev;
555 struct nvm_geo *geo = &dev->geo;
556 sector_t provisioned;
560 provisioned = nr_free_blks;
561 provisioned *= (100 - pblk->over_pct);
562 sector_div(provisioned, 100);
564 /* Internally pblk manages all free blocks, but all calculations based
565 * on user capacity consider only provisioned blocks
567 pblk->rl.total_blocks = nr_free_blks;
568 pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
569 pblk->capacity = provisioned * geo->sec_per_blk;
570 atomic_set(&pblk->rl.free_blocks, nr_free_blks);
573 static int pblk_lines_alloc_metadata(struct pblk *pblk)
575 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
576 struct pblk_line_meta *lm = &pblk->lm;
579 /* smeta is always small enough to fit on a kmalloc memory allocation,
580 * emeta depends on the number of LUNs allocated to the pblk instance
582 for (i = 0; i < PBLK_DATA_LINES; i++) {
583 l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
584 if (!l_mg->sline_meta[i])
585 goto fail_free_smeta;
588 /* emeta allocates three different buffers for managing metadata with
589 * in-memory and in-media layouts
591 for (i = 0; i < PBLK_DATA_LINES; i++) {
592 struct pblk_emeta *emeta;
594 emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
596 goto fail_free_emeta;
598 if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
599 l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
601 emeta->buf = vmalloc(lm->emeta_len[0]);
604 goto fail_free_emeta;
607 emeta->nr_entries = lm->emeta_sec[0];
608 l_mg->eline_meta[i] = emeta;
610 l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
612 emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
615 goto fail_free_emeta;
618 emeta->nr_entries = lm->emeta_sec[0];
619 l_mg->eline_meta[i] = emeta;
623 l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
625 goto fail_free_emeta;
627 for (i = 0; i < l_mg->nr_lines; i++)
628 l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
634 if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
635 vfree(l_mg->eline_meta[i]->buf);
637 kfree(l_mg->eline_meta[i]->buf);
638 kfree(l_mg->eline_meta[i]);
642 for (i = 0; i < PBLK_DATA_LINES; i++)
643 kfree(l_mg->sline_meta[i]);
648 static int pblk_lines_init(struct pblk *pblk)
650 struct nvm_tgt_dev *dev = pblk->dev;
651 struct nvm_geo *geo = &dev->geo;
652 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
653 struct pblk_line_meta *lm = &pblk->lm;
654 struct pblk_line *line;
655 unsigned int smeta_len, emeta_len;
656 long nr_bad_blks, nr_free_blks;
657 int bb_distance, max_write_ppas, mod;
660 pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
661 max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
662 pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
663 max_write_ppas : nvm_max_phys_sects(dev);
664 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
666 if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
667 pr_err("pblk: cannot support device max_phys_sect\n");
671 div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
673 pr_err("pblk: bad configuration of sectors/pages\n");
677 l_mg->nr_lines = geo->blks_per_lun;
678 l_mg->log_line = l_mg->data_line = NULL;
679 l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
680 l_mg->nr_free_lines = 0;
681 bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
683 lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
684 lm->blk_per_line = geo->nr_luns;
685 lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
686 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
687 lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
688 lm->mid_thrs = lm->sec_per_line / 2;
689 lm->high_thrs = lm->sec_per_line / 4;
690 lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
692 /* Calculate necessary pages for smeta. See comment over struct
693 * line_smeta definition
697 lm->smeta_sec = i * geo->sec_per_pl;
698 lm->smeta_len = lm->smeta_sec * geo->sec_size;
700 smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
701 if (smeta_len > lm->smeta_len) {
706 /* Calculate necessary pages for emeta. See comment over struct
707 * line_emeta definition
711 lm->emeta_sec[0] = i * geo->sec_per_pl;
712 lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
714 emeta_len = calc_emeta_len(pblk);
715 if (emeta_len > lm->emeta_len[0]) {
720 lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
722 lm->min_blk_line = 1;
723 if (geo->nr_luns > 1)
724 lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
725 lm->emeta_sec[0], geo->sec_per_blk);
727 if (lm->min_blk_line > lm->blk_per_line) {
728 pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
734 ret = pblk_lines_alloc_metadata(pblk);
738 l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
739 if (!l_mg->bb_template) {
744 l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
747 goto fail_free_bb_template;
750 bb_distance = (geo->nr_luns) * geo->sec_per_pl;
751 for (i = 0; i < lm->sec_per_line; i += bb_distance)
752 bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
754 INIT_LIST_HEAD(&l_mg->free_list);
755 INIT_LIST_HEAD(&l_mg->corrupt_list);
756 INIT_LIST_HEAD(&l_mg->bad_list);
757 INIT_LIST_HEAD(&l_mg->gc_full_list);
758 INIT_LIST_HEAD(&l_mg->gc_high_list);
759 INIT_LIST_HEAD(&l_mg->gc_mid_list);
760 INIT_LIST_HEAD(&l_mg->gc_low_list);
761 INIT_LIST_HEAD(&l_mg->gc_empty_list);
763 INIT_LIST_HEAD(&l_mg->emeta_list);
765 l_mg->gc_lists[0] = &l_mg->gc_high_list;
766 l_mg->gc_lists[1] = &l_mg->gc_mid_list;
767 l_mg->gc_lists[2] = &l_mg->gc_low_list;
769 spin_lock_init(&l_mg->free_lock);
770 spin_lock_init(&l_mg->close_lock);
771 spin_lock_init(&l_mg->gc_lock);
773 pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
777 goto fail_free_bb_aux;
781 for (i = 0; i < l_mg->nr_lines; i++) {
784 line = &pblk->lines[i];
788 line->type = PBLK_LINETYPE_FREE;
789 line->state = PBLK_LINESTATE_FREE;
790 line->gc_group = PBLK_LINEGC_NONE;
791 line->vsc = &l_mg->vsc_list[i];
792 spin_lock_init(&line->lock);
794 ret = pblk_alloc_line_bitmaps(pblk, line);
796 goto fail_free_lines;
798 nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line);
799 if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
800 pblk_free_line_bitmaps(line);
802 goto fail_free_lines;
805 blk_in_line = lm->blk_per_line - nr_bad_blks;
806 if (blk_in_line < lm->min_blk_line) {
807 line->state = PBLK_LINESTATE_BAD;
808 list_add_tail(&line->list, &l_mg->bad_list);
812 nr_free_blks += blk_in_line;
813 atomic_set(&line->blk_in_line, blk_in_line);
815 l_mg->nr_free_lines++;
816 list_add_tail(&line->list, &l_mg->free_list);
819 pblk_set_provision(pblk, nr_free_blks);
821 /* Cleanup per-LUN bad block lists - managed within lines on run-time */
822 for (i = 0; i < geo->nr_luns; i++)
823 kfree(pblk->luns[i].bb_list);
828 pblk_free_line_bitmaps(&pblk->lines[i]);
831 fail_free_bb_template:
832 kfree(l_mg->bb_template);
834 pblk_line_meta_free(pblk);
836 for (i = 0; i < geo->nr_luns; i++)
837 kfree(pblk->luns[i].bb_list);
842 static int pblk_writer_init(struct pblk *pblk)
844 setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
845 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
847 pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
848 if (IS_ERR(pblk->writer_ts)) {
849 pr_err("pblk: could not allocate writer kthread\n");
850 return PTR_ERR(pblk->writer_ts);
856 static void pblk_writer_stop(struct pblk *pblk)
858 /* The pipeline must be stopped and the write buffer emptied before the
859 * write thread is stopped
861 WARN(pblk_rb_read_count(&pblk->rwb),
862 "Stopping not fully persisted write buffer\n");
864 WARN(pblk_rb_sync_count(&pblk->rwb),
865 "Stopping not fully synced write buffer\n");
868 kthread_stop(pblk->writer_ts);
869 del_timer(&pblk->wtimer);
872 static void pblk_free(struct pblk *pblk)
874 pblk_luns_free(pblk);
875 pblk_lines_free(pblk);
876 pblk_line_meta_free(pblk);
877 pblk_core_free(pblk);
883 static void pblk_tear_down(struct pblk *pblk)
885 pblk_pipeline_stop(pblk);
886 pblk_writer_stop(pblk);
887 pblk_rb_sync_l2p(&pblk->rwb);
889 pblk_rl_free(&pblk->rl);
891 pr_debug("pblk: consistent tear down\n");
894 static void pblk_exit(void *private)
896 struct pblk *pblk = private;
898 down_write(&pblk_lock);
900 pblk_tear_down(pblk);
902 up_write(&pblk_lock);
905 static sector_t pblk_capacity(void *private)
907 struct pblk *pblk = private;
909 return pblk->capacity * NR_PHY_IN_LOG;
912 static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
915 struct nvm_geo *geo = &dev->geo;
916 struct request_queue *bqueue = dev->q;
917 struct request_queue *tqueue = tdisk->queue;
921 if (dev->identity.dom & NVM_RSP_L2P) {
922 pr_err("pblk: host-side L2P table not supported. (%x)\n",
924 return ERR_PTR(-EINVAL);
927 pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
929 return ERR_PTR(-ENOMEM);
933 pblk->state = PBLK_STATE_RUNNING;
934 pblk->gc.gc_enabled = 0;
936 spin_lock_init(&pblk->trans_lock);
937 spin_lock_init(&pblk->lock);
939 if (flags & NVM_TARGET_FACTORY)
940 pblk_setup_uuid(pblk);
942 #ifdef CONFIG_NVM_DEBUG
943 atomic_long_set(&pblk->inflight_writes, 0);
944 atomic_long_set(&pblk->padded_writes, 0);
945 atomic_long_set(&pblk->padded_wb, 0);
946 atomic_long_set(&pblk->nr_flush, 0);
947 atomic_long_set(&pblk->req_writes, 0);
948 atomic_long_set(&pblk->sub_writes, 0);
949 atomic_long_set(&pblk->sync_writes, 0);
950 atomic_long_set(&pblk->inflight_reads, 0);
951 atomic_long_set(&pblk->cache_reads, 0);
952 atomic_long_set(&pblk->sync_reads, 0);
953 atomic_long_set(&pblk->recov_writes, 0);
954 atomic_long_set(&pblk->recov_writes, 0);
955 atomic_long_set(&pblk->recov_gc_writes, 0);
956 atomic_long_set(&pblk->recov_gc_reads, 0);
959 atomic_long_set(&pblk->read_failed, 0);
960 atomic_long_set(&pblk->read_empty, 0);
961 atomic_long_set(&pblk->read_high_ecc, 0);
962 atomic_long_set(&pblk->read_failed_gc, 0);
963 atomic_long_set(&pblk->write_failed, 0);
964 atomic_long_set(&pblk->erase_failed, 0);
966 ret = pblk_luns_init(pblk, dev->luns);
968 pr_err("pblk: could not initialize luns\n");
972 ret = pblk_lines_init(pblk);
974 pr_err("pblk: could not initialize lines\n");
978 ret = pblk_core_init(pblk);
980 pr_err("pblk: could not initialize core\n");
981 goto fail_free_line_meta;
984 ret = pblk_l2p_init(pblk);
986 pr_err("pblk: could not initialize maps\n");
990 ret = pblk_lines_configure(pblk, flags);
992 pr_err("pblk: could not configure lines\n");
996 ret = pblk_writer_init(pblk);
998 pr_err("pblk: could not initialize write thread\n");
999 goto fail_free_lines;
1002 ret = pblk_gc_init(pblk);
1004 pr_err("pblk: could not initialize gc\n");
1005 goto fail_stop_writer;
1008 /* inherit the size from the underlying device */
1009 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1010 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1012 blk_queue_write_cache(tqueue, true, false);
1014 tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
1015 tqueue->limits.discard_alignment = 0;
1016 blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
1017 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
1019 pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
1020 geo->nr_luns, pblk->l_mg.nr_lines,
1021 (unsigned long long)pblk->rl.nr_secs,
1022 pblk->rwb.nr_entries);
1024 wake_up_process(pblk->writer_ts);
1028 pblk_writer_stop(pblk);
1030 pblk_lines_free(pblk);
1032 pblk_l2p_free(pblk);
1034 pblk_core_free(pblk);
1035 fail_free_line_meta:
1036 pblk_line_meta_free(pblk);
1038 pblk_luns_free(pblk);
1041 return ERR_PTR(ret);
1044 /* physical block device target */
1045 static struct nvm_tgt_type tt_pblk = {
1047 .version = {1, 0, 0},
1049 .make_rq = pblk_make_rq,
1050 .capacity = pblk_capacity,
1055 .sysfs_init = pblk_sysfs_init,
1056 .sysfs_exit = pblk_sysfs_exit,
1057 .owner = THIS_MODULE,
1060 static int __init pblk_module_init(void)
1064 pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
1067 ret = nvm_register_tgt_type(&tt_pblk);
1069 bioset_free(pblk_bio_set);
1073 static void pblk_module_exit(void)
1075 bioset_free(pblk_bio_set);
1076 nvm_unregister_tgt_type(&tt_pblk);
1079 module_init(pblk_module_init);
1080 module_exit(pblk_module_exit);
1081 MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1082 MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1083 MODULE_LICENSE("GPL v2");
1084 MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");