2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23 struct nvm_rq *rqd, unsigned long flags);
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
31 struct rrpc_block *rblk = a->rblk;
32 unsigned int pg_offset;
34 lockdep_assert_held(&rrpc->rev_lock);
36 if (a->addr == ADDR_EMPTY || !rblk)
39 spin_lock(&rblk->lock);
41 div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
42 WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
43 rblk->nr_invalid_pages++;
45 spin_unlock(&rblk->lock);
47 rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
50 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
55 spin_lock(&rrpc->rev_lock);
56 for (i = slba; i < slba + len; i++) {
57 struct rrpc_addr *gp = &rrpc->trans_map[i];
59 rrpc_page_invalidate(rrpc, gp);
62 spin_unlock(&rrpc->rev_lock);
65 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
66 sector_t laddr, unsigned int pages)
69 struct rrpc_inflight_rq *inf;
71 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
73 return ERR_PTR(-ENOMEM);
75 inf = rrpc_get_inflight_rq(rqd);
76 if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
77 mempool_free(rqd, rrpc->rq_pool);
84 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
86 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
88 rrpc_unlock_laddr(rrpc, inf);
90 mempool_free(rqd, rrpc->rq_pool);
93 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
95 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
96 sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
100 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
105 pr_err("rrpc: unable to acquire inflight IO\n");
110 rrpc_invalidate_range(rrpc, slba, len);
111 rrpc_inflight_laddr_release(rrpc, rqd);
114 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
116 return (rblk->next_page == rrpc->dev->sec_per_blk);
119 /* Calculate relative addr for the given block, considering instantiated LUNs */
120 static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
122 struct nvm_block *blk = rblk->parent;
123 int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
125 return lun_blk * rrpc->dev->sec_per_blk;
128 /* Calculate global addr for the given block */
129 static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
131 struct nvm_block *blk = rblk->parent;
133 return blk->id * rrpc->dev->sec_per_blk;
136 static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
140 int secs, pgs, blks, luns;
141 sector_t ppa = r.ppa;
145 div_u64_rem(ppa, dev->sec_per_pg, &secs);
148 sector_div(ppa, dev->sec_per_pg);
149 div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
152 sector_div(ppa, dev->pgs_per_blk);
153 div_u64_rem(ppa, dev->blks_per_lun, &blks);
156 sector_div(ppa, dev->blks_per_lun);
157 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
160 sector_div(ppa, dev->luns_per_chnl);
166 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
168 struct ppa_addr paddr;
171 return linear_to_generic_addr(dev, paddr);
174 /* requires lun->lock taken */
175 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
177 struct rrpc *rrpc = rlun->rrpc;
182 spin_lock(&rlun->cur->lock);
183 WARN_ON(!block_is_full(rrpc, rlun->cur));
184 spin_unlock(&rlun->cur->lock);
189 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
192 struct nvm_lun *lun = rlun->parent;
193 struct nvm_block *blk;
194 struct rrpc_block *rblk;
196 spin_lock(&lun->lock);
197 blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
199 pr_err("nvm: rrpc: cannot get new block from media manager\n");
200 spin_unlock(&lun->lock);
204 rblk = rrpc_get_rblk(rlun, blk->id);
205 list_add_tail(&rblk->list, &rlun->open_list);
206 spin_unlock(&lun->lock);
209 bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
211 rblk->nr_invalid_pages = 0;
212 atomic_set(&rblk->data_cmnt_size, 0);
217 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
219 struct rrpc_lun *rlun = rblk->rlun;
220 struct nvm_lun *lun = rlun->parent;
222 spin_lock(&lun->lock);
223 nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
224 list_del(&rblk->list);
225 spin_unlock(&lun->lock);
228 static void rrpc_put_blks(struct rrpc *rrpc)
230 struct rrpc_lun *rlun;
233 for (i = 0; i < rrpc->nr_luns; i++) {
234 rlun = &rrpc->luns[i];
236 rrpc_put_blk(rrpc, rlun->cur);
238 rrpc_put_blk(rrpc, rlun->gc_cur);
242 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
244 int next = atomic_inc_return(&rrpc->next_lun);
246 return &rrpc->luns[next % rrpc->nr_luns];
249 static void rrpc_gc_kick(struct rrpc *rrpc)
251 struct rrpc_lun *rlun;
254 for (i = 0; i < rrpc->nr_luns; i++) {
255 rlun = &rrpc->luns[i];
256 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
261 * timed GC every interval.
263 static void rrpc_gc_timer(unsigned long data)
265 struct rrpc *rrpc = (struct rrpc *)data;
268 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
271 static void rrpc_end_sync_bio(struct bio *bio)
273 struct completion *waiting = bio->bi_private;
276 pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
282 * rrpc_move_valid_pages -- migrate live data off the block
283 * @rrpc: the 'rrpc' structure
284 * @block: the block from which to migrate live pages
287 * GC algorithms may call this function to migrate remaining live
288 * pages off the block prior to erasing it. This function blocks
289 * further execution until the operation is complete.
291 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
293 struct request_queue *q = rrpc->dev->q;
294 struct rrpc_rev_addr *rev;
299 int nr_sec_per_blk = rrpc->dev->sec_per_blk;
301 DECLARE_COMPLETION_ONSTACK(wait);
303 if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
306 bio = bio_alloc(GFP_NOIO, 1);
308 pr_err("nvm: could not alloc bio to gc\n");
312 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
318 while ((slot = find_first_zero_bit(rblk->invalid_pages,
319 nr_sec_per_blk)) < nr_sec_per_blk) {
322 phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
325 spin_lock(&rrpc->rev_lock);
326 /* Get logical address from physical to logical table */
327 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
328 /* already updated by previous regular write */
329 if (rev->addr == ADDR_EMPTY) {
330 spin_unlock(&rrpc->rev_lock);
334 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
335 if (IS_ERR_OR_NULL(rqd)) {
336 spin_unlock(&rrpc->rev_lock);
341 spin_unlock(&rrpc->rev_lock);
343 /* Perform read to do GC */
344 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
346 bio->bi_private = &wait;
347 bio->bi_end_io = rrpc_end_sync_bio;
349 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
350 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
352 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
353 pr_err("rrpc: gc read failed.\n");
354 rrpc_inflight_laddr_release(rrpc, rqd);
357 wait_for_completion_io(&wait);
359 rrpc_inflight_laddr_release(rrpc, rqd);
364 reinit_completion(&wait);
366 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
368 bio->bi_private = &wait;
369 bio->bi_end_io = rrpc_end_sync_bio;
371 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
373 /* turn the command around and write the data back to a new
376 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
377 pr_err("rrpc: gc write failed.\n");
378 rrpc_inflight_laddr_release(rrpc, rqd);
381 wait_for_completion_io(&wait);
383 rrpc_inflight_laddr_release(rrpc, rqd);
391 mempool_free(page, rrpc->page_pool);
394 if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
395 pr_err("nvm: failed to garbage collect block\n");
402 static void rrpc_block_gc(struct work_struct *work)
404 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
406 struct rrpc *rrpc = gcb->rrpc;
407 struct rrpc_block *rblk = gcb->rblk;
408 struct rrpc_lun *rlun = rblk->rlun;
409 struct nvm_dev *dev = rrpc->dev;
411 mempool_free(gcb, rrpc->gcb_pool);
412 pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
414 if (rrpc_move_valid_pages(rrpc, rblk))
417 if (nvm_erase_blk(dev, rblk->parent))
420 rrpc_put_blk(rrpc, rblk);
425 spin_lock(&rlun->lock);
426 list_add_tail(&rblk->prio, &rlun->prio_list);
427 spin_unlock(&rlun->lock);
430 /* the block with highest number of invalid pages, will be in the beginning
433 static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
434 struct rrpc_block *rb)
436 if (ra->nr_invalid_pages == rb->nr_invalid_pages)
439 return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
442 /* linearly find the block with highest number of invalid pages
445 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
447 struct list_head *prio_list = &rlun->prio_list;
448 struct rrpc_block *rblock, *max;
450 BUG_ON(list_empty(prio_list));
452 max = list_first_entry(prio_list, struct rrpc_block, prio);
453 list_for_each_entry(rblock, prio_list, prio)
454 max = rblock_max_invalid(max, rblock);
459 static void rrpc_lun_gc(struct work_struct *work)
461 struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
462 struct rrpc *rrpc = rlun->rrpc;
463 struct nvm_lun *lun = rlun->parent;
464 struct rrpc_block_gc *gcb;
465 unsigned int nr_blocks_need;
467 nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
469 if (nr_blocks_need < rrpc->nr_luns)
470 nr_blocks_need = rrpc->nr_luns;
472 spin_lock(&rlun->lock);
473 while (nr_blocks_need > lun->nr_free_blocks &&
474 !list_empty(&rlun->prio_list)) {
475 struct rrpc_block *rblock = block_prio_find_max(rlun);
476 struct nvm_block *block = rblock->parent;
478 if (!rblock->nr_invalid_pages)
481 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
485 list_del_init(&rblock->prio);
487 BUG_ON(!block_is_full(rrpc, rblock));
489 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
493 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
495 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
499 spin_unlock(&rlun->lock);
501 /* TODO: Hint that request queue can be started again */
504 static void rrpc_gc_queue(struct work_struct *work)
506 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
508 struct rrpc *rrpc = gcb->rrpc;
509 struct rrpc_block *rblk = gcb->rblk;
510 struct rrpc_lun *rlun = rblk->rlun;
511 struct nvm_lun *lun = rblk->parent->lun;
512 struct nvm_block *blk = rblk->parent;
514 spin_lock(&rlun->lock);
515 list_add_tail(&rblk->prio, &rlun->prio_list);
516 spin_unlock(&rlun->lock);
518 spin_lock(&lun->lock);
519 lun->nr_open_blocks--;
520 lun->nr_closed_blocks++;
521 blk->state &= ~NVM_BLK_ST_OPEN;
522 blk->state |= NVM_BLK_ST_CLOSED;
523 list_move_tail(&rblk->list, &rlun->closed_list);
524 spin_unlock(&lun->lock);
526 mempool_free(gcb, rrpc->gcb_pool);
527 pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
531 static const struct block_device_operations rrpc_fops = {
532 .owner = THIS_MODULE,
535 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
538 struct rrpc_lun *rlun, *max_free;
541 return get_next_lun(rrpc);
543 /* during GC, we don't care about RR, instead we want to make
544 * sure that we maintain evenness between the block luns.
546 max_free = &rrpc->luns[0];
547 /* prevent GC-ing lun from devouring pages of a lun with
548 * little free blocks. We don't take the lock as we only need an
551 rrpc_for_each_lun(rrpc, rlun, i) {
552 if (rlun->parent->nr_free_blocks >
553 max_free->parent->nr_free_blocks)
560 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
561 struct rrpc_block *rblk, u64 paddr)
563 struct rrpc_addr *gp;
564 struct rrpc_rev_addr *rev;
566 BUG_ON(laddr >= rrpc->nr_sects);
568 gp = &rrpc->trans_map[laddr];
569 spin_lock(&rrpc->rev_lock);
571 rrpc_page_invalidate(rrpc, gp);
576 rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
578 spin_unlock(&rrpc->rev_lock);
583 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
585 u64 addr = ADDR_EMPTY;
587 spin_lock(&rblk->lock);
588 if (block_is_full(rrpc, rblk))
591 addr = block_to_addr(rrpc, rblk) + rblk->next_page;
595 spin_unlock(&rblk->lock);
599 /* Simple round-robin Logical to physical address translation.
601 * Retrieve the mapping using the active append point. Then update the ap for
602 * the next write to the disk.
604 * Returns rrpc_addr with the physical address and block. Remember to return to
605 * rrpc->addr_cache when request is finished.
607 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
610 struct rrpc_lun *rlun;
611 struct rrpc_block *rblk;
615 rlun = rrpc_get_lun_rr(rrpc, is_gc);
618 if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
621 spin_lock(&rlun->lock);
625 paddr = rrpc_alloc_addr(rrpc, rblk);
627 if (paddr == ADDR_EMPTY) {
628 rblk = rrpc_get_blk(rrpc, rlun, 0);
630 rrpc_set_lun_cur(rlun, rblk);
635 /* retry from emergency gc block */
636 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
637 if (paddr == ADDR_EMPTY) {
638 rblk = rrpc_get_blk(rrpc, rlun, 1);
640 pr_err("rrpc: no more blocks");
645 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
651 spin_unlock(&rlun->lock);
652 return rrpc_update_map(rrpc, laddr, rblk, paddr);
654 spin_unlock(&rlun->lock);
658 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
660 struct rrpc_block_gc *gcb;
662 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
664 pr_err("rrpc: unable to queue block for gc.");
671 INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
672 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
675 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
676 sector_t laddr, uint8_t npages)
679 struct rrpc_block *rblk;
683 for (i = 0; i < npages; i++) {
684 p = &rrpc->trans_map[laddr + i];
686 lun = rblk->parent->lun;
688 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
689 if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
690 rrpc_run_gc(rrpc, rblk);
694 static void rrpc_end_io(struct nvm_rq *rqd)
696 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
697 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
698 uint8_t npages = rqd->nr_ppas;
699 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
701 if (bio_data_dir(rqd->bio) == WRITE)
702 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
706 if (rrqd->flags & NVM_IOTYPE_GC)
709 rrpc_unlock_rq(rrpc, rqd);
712 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
714 mempool_free(rqd, rrpc->rq_pool);
717 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
718 struct nvm_rq *rqd, unsigned long flags, int npages)
720 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
721 struct rrpc_addr *gp;
722 sector_t laddr = rrpc_get_laddr(bio);
723 int is_gc = flags & NVM_IOTYPE_GC;
726 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
727 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
728 return NVM_IO_REQUEUE;
731 for (i = 0; i < npages; i++) {
732 /* We assume that mapping occurs at 4KB granularity */
733 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
734 gp = &rrpc->trans_map[laddr + i];
737 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
741 rrpc_unlock_laddr(rrpc, r);
742 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
748 rqd->opcode = NVM_OP_HBREAD;
753 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
756 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
757 int is_gc = flags & NVM_IOTYPE_GC;
758 sector_t laddr = rrpc_get_laddr(bio);
759 struct rrpc_addr *gp;
761 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
762 return NVM_IO_REQUEUE;
764 BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
765 gp = &rrpc->trans_map[laddr];
768 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
771 rrpc_unlock_rq(rrpc, rqd);
775 rqd->opcode = NVM_OP_HBREAD;
781 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
782 struct nvm_rq *rqd, unsigned long flags, int npages)
784 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
786 sector_t laddr = rrpc_get_laddr(bio);
787 int is_gc = flags & NVM_IOTYPE_GC;
790 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
791 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
792 return NVM_IO_REQUEUE;
795 for (i = 0; i < npages; i++) {
796 /* We assume that mapping occurs at 4KB granularity */
797 p = rrpc_map_page(rrpc, laddr + i, is_gc);
800 rrpc_unlock_laddr(rrpc, r);
801 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
804 return NVM_IO_REQUEUE;
807 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
811 rqd->opcode = NVM_OP_HBWRITE;
816 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
817 struct nvm_rq *rqd, unsigned long flags)
819 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
821 int is_gc = flags & NVM_IOTYPE_GC;
822 sector_t laddr = rrpc_get_laddr(bio);
824 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
825 return NVM_IO_REQUEUE;
827 p = rrpc_map_page(rrpc, laddr, is_gc);
830 rrpc_unlock_rq(rrpc, rqd);
832 return NVM_IO_REQUEUE;
835 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
836 rqd->opcode = NVM_OP_HBWRITE;
842 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
843 struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
846 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
848 if (!rqd->ppa_list) {
849 pr_err("rrpc: not able to allocate ppa list\n");
853 if (bio_rw(bio) == WRITE)
854 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
857 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
860 if (bio_rw(bio) == WRITE)
861 return rrpc_write_rq(rrpc, bio, rqd, flags);
863 return rrpc_read_rq(rrpc, bio, rqd, flags);
866 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
867 struct nvm_rq *rqd, unsigned long flags)
870 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
871 uint8_t nr_pages = rrpc_get_pages(bio);
872 int bio_size = bio_sectors(bio) << 9;
874 if (bio_size < rrpc->dev->sec_size)
876 else if (bio_size > rrpc->dev->max_rq_size)
879 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
885 rqd->ins = &rrpc->instance;
886 rqd->nr_ppas = nr_pages;
889 err = nvm_submit_io(rrpc->dev, rqd);
891 pr_err("rrpc: I/O submission failed: %d\n", err);
893 if (!(flags & NVM_IOTYPE_GC)) {
894 rrpc_unlock_rq(rrpc, rqd);
895 if (rqd->nr_ppas > 1)
896 nvm_dev_dma_free(rrpc->dev,
897 rqd->ppa_list, rqd->dma_ppa_list);
905 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
907 struct rrpc *rrpc = q->queuedata;
911 if (bio->bi_rw & REQ_DISCARD) {
912 rrpc_discard(rrpc, bio);
913 return BLK_QC_T_NONE;
916 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
918 pr_err_ratelimited("rrpc: not able to queue bio.");
920 return BLK_QC_T_NONE;
922 memset(rqd, 0, sizeof(struct nvm_rq));
924 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
927 return BLK_QC_T_NONE;
935 spin_lock(&rrpc->bio_lock);
936 bio_list_add(&rrpc->requeue_bios, bio);
937 spin_unlock(&rrpc->bio_lock);
938 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
942 mempool_free(rqd, rrpc->rq_pool);
943 return BLK_QC_T_NONE;
946 static void rrpc_requeue(struct work_struct *work)
948 struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
949 struct bio_list bios;
952 bio_list_init(&bios);
954 spin_lock(&rrpc->bio_lock);
955 bio_list_merge(&bios, &rrpc->requeue_bios);
956 bio_list_init(&rrpc->requeue_bios);
957 spin_unlock(&rrpc->bio_lock);
959 while ((bio = bio_list_pop(&bios)))
960 rrpc_make_rq(rrpc->disk->queue, bio);
963 static void rrpc_gc_free(struct rrpc *rrpc)
966 destroy_workqueue(rrpc->krqd_wq);
969 destroy_workqueue(rrpc->kgc_wq);
972 static int rrpc_gc_init(struct rrpc *rrpc)
974 rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
979 rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
983 setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
988 static void rrpc_map_free(struct rrpc *rrpc)
990 vfree(rrpc->rev_trans_map);
991 vfree(rrpc->trans_map);
994 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
996 struct rrpc *rrpc = (struct rrpc *)private;
997 struct nvm_dev *dev = rrpc->dev;
998 struct rrpc_addr *addr = rrpc->trans_map + slba;
999 struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
1000 u64 elba = slba + nlb;
1003 if (unlikely(elba > dev->total_secs)) {
1004 pr_err("nvm: L2P data from device is out of bounds!\n");
1008 for (i = 0; i < nlb; i++) {
1009 u64 pba = le64_to_cpu(entries[i]);
1011 /* LNVM treats address-spaces as silos, LBA and PBA are
1012 * equally large and zero-indexed.
1014 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
1015 pr_err("nvm: L2P data entry is out of bounds!\n");
1019 /* Address zero is a special one. The first page on a disk is
1020 * protected. As it often holds internal device boot
1026 div_u64_rem(pba, rrpc->nr_sects, &mod);
1029 raddr[mod].addr = slba + i;
1035 static int rrpc_map_init(struct rrpc *rrpc)
1037 struct nvm_dev *dev = rrpc->dev;
1041 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
1042 if (!rrpc->trans_map)
1045 rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1047 if (!rrpc->rev_trans_map)
1050 for (i = 0; i < rrpc->nr_sects; i++) {
1051 struct rrpc_addr *p = &rrpc->trans_map[i];
1052 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1054 p->addr = ADDR_EMPTY;
1055 r->addr = ADDR_EMPTY;
1058 if (!dev->ops->get_l2p_tbl)
1061 /* Bring up the mapping table from device */
1062 ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
1063 rrpc_l2p_update, rrpc);
1065 pr_err("nvm: rrpc: could not read L2P table.\n");
1072 /* Minimum pages needed within a lun */
1073 #define PAGE_POOL_SIZE 16
1074 #define ADDR_POOL_SIZE 64
1076 static int rrpc_core_init(struct rrpc *rrpc)
1078 down_write(&rrpc_lock);
1079 if (!rrpc_gcb_cache) {
1080 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1081 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1082 if (!rrpc_gcb_cache) {
1083 up_write(&rrpc_lock);
1087 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1088 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1090 if (!rrpc_rq_cache) {
1091 kmem_cache_destroy(rrpc_gcb_cache);
1092 up_write(&rrpc_lock);
1096 up_write(&rrpc_lock);
1098 rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1099 if (!rrpc->page_pool)
1102 rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
1104 if (!rrpc->gcb_pool)
1107 rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1111 spin_lock_init(&rrpc->inflights.lock);
1112 INIT_LIST_HEAD(&rrpc->inflights.reqs);
1117 static void rrpc_core_free(struct rrpc *rrpc)
1119 mempool_destroy(rrpc->page_pool);
1120 mempool_destroy(rrpc->gcb_pool);
1121 mempool_destroy(rrpc->rq_pool);
1124 static void rrpc_luns_free(struct rrpc *rrpc)
1126 struct nvm_dev *dev = rrpc->dev;
1127 struct nvm_lun *lun;
1128 struct rrpc_lun *rlun;
1134 for (i = 0; i < rrpc->nr_luns; i++) {
1135 rlun = &rrpc->luns[i];
1139 dev->mt->release_lun(dev, lun->id);
1140 vfree(rlun->blocks);
1146 static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1148 struct nvm_dev *dev = rrpc->dev;
1149 struct rrpc_lun *rlun;
1150 int i, j, ret = -EINVAL;
1152 if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1153 pr_err("rrpc: number of pages per block too high.");
1157 spin_lock_init(&rrpc->rev_lock);
1159 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1165 for (i = 0; i < rrpc->nr_luns; i++) {
1166 int lunid = lun_begin + i;
1167 struct nvm_lun *lun;
1169 if (dev->mt->reserve_lun(dev, lunid)) {
1170 pr_err("rrpc: lun %u is already allocated\n", lunid);
1174 lun = dev->mt->get_lun(dev, lunid);
1178 rlun = &rrpc->luns[i];
1180 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1181 rrpc->dev->blks_per_lun);
1182 if (!rlun->blocks) {
1187 for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
1188 struct rrpc_block *rblk = &rlun->blocks[j];
1189 struct nvm_block *blk = &lun->blocks[j];
1193 INIT_LIST_HEAD(&rblk->prio);
1194 spin_lock_init(&rblk->lock);
1198 INIT_LIST_HEAD(&rlun->prio_list);
1199 INIT_LIST_HEAD(&rlun->open_list);
1200 INIT_LIST_HEAD(&rlun->closed_list);
1202 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1203 spin_lock_init(&rlun->lock);
1211 /* returns 0 on success and stores the beginning address in *begin */
1212 static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
1214 struct nvm_dev *dev = rrpc->dev;
1215 struct nvmm_type *mt = dev->mt;
1216 sector_t size = rrpc->nr_sects * dev->sec_size;
1221 ret = mt->get_area(dev, begin, size);
1223 *begin >>= (ilog2(dev->sec_size) - 9);
1228 static void rrpc_area_free(struct rrpc *rrpc)
1230 struct nvm_dev *dev = rrpc->dev;
1231 struct nvmm_type *mt = dev->mt;
1232 sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
1234 mt->put_area(dev, begin);
1237 static void rrpc_free(struct rrpc *rrpc)
1240 rrpc_map_free(rrpc);
1241 rrpc_core_free(rrpc);
1242 rrpc_luns_free(rrpc);
1243 rrpc_area_free(rrpc);
1248 static void rrpc_exit(void *private)
1250 struct rrpc *rrpc = private;
1252 del_timer(&rrpc->gc_timer);
1254 flush_workqueue(rrpc->krqd_wq);
1255 flush_workqueue(rrpc->kgc_wq);
1260 static sector_t rrpc_capacity(void *private)
1262 struct rrpc *rrpc = private;
1263 struct nvm_dev *dev = rrpc->dev;
1264 sector_t reserved, provisioned;
1266 /* cur, gc, and two emergency blocks for each lun */
1267 reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
1268 provisioned = rrpc->nr_sects - reserved;
1270 if (reserved > rrpc->nr_sects) {
1271 pr_err("rrpc: not enough space available to expose storage.\n");
1275 sector_div(provisioned, 10);
1276 return provisioned * 9 * NR_PHY_IN_LOG;
1280 * Looks up the logical address from reverse trans map and check if its valid by
1281 * comparing the logical to physical address with the physical address.
1282 * Returns 0 on free, otherwise 1 if in use
1284 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1286 struct nvm_dev *dev = rrpc->dev;
1288 struct rrpc_addr *laddr;
1289 u64 bpaddr, paddr, pladdr;
1291 bpaddr = block_to_rel_addr(rrpc, rblk);
1292 for (offset = 0; offset < dev->sec_per_blk; offset++) {
1293 paddr = bpaddr + offset;
1295 pladdr = rrpc->rev_trans_map[paddr].addr;
1296 if (pladdr == ADDR_EMPTY)
1299 laddr = &rrpc->trans_map[pladdr];
1301 if (paddr == laddr->addr) {
1304 set_bit(offset, rblk->invalid_pages);
1305 rblk->nr_invalid_pages++;
1310 static int rrpc_blocks_init(struct rrpc *rrpc)
1312 struct rrpc_lun *rlun;
1313 struct rrpc_block *rblk;
1314 int lun_iter, blk_iter;
1316 for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1317 rlun = &rrpc->luns[lun_iter];
1319 for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
1321 rblk = &rlun->blocks[blk_iter];
1322 rrpc_block_map_update(rrpc, rblk);
1329 static int rrpc_luns_configure(struct rrpc *rrpc)
1331 struct rrpc_lun *rlun;
1332 struct rrpc_block *rblk;
1335 for (i = 0; i < rrpc->nr_luns; i++) {
1336 rlun = &rrpc->luns[i];
1338 rblk = rrpc_get_blk(rrpc, rlun, 0);
1342 rrpc_set_lun_cur(rlun, rblk);
1344 /* Emergency gc block */
1345 rblk = rrpc_get_blk(rrpc, rlun, 1);
1348 rlun->gc_cur = rblk;
1353 rrpc_put_blks(rrpc);
1357 static struct nvm_tgt_type tt_rrpc;
1359 static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
1360 int lun_begin, int lun_end)
1362 struct request_queue *bqueue = dev->q;
1363 struct request_queue *tqueue = tdisk->queue;
1368 if (!(dev->identity.dom & NVM_RSP_L2P)) {
1369 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1371 return ERR_PTR(-EINVAL);
1374 rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1376 return ERR_PTR(-ENOMEM);
1378 rrpc->instance.tt = &tt_rrpc;
1382 bio_list_init(&rrpc->requeue_bios);
1383 spin_lock_init(&rrpc->bio_lock);
1384 INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1386 rrpc->nr_luns = lun_end - lun_begin + 1;
1387 rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
1388 rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
1390 /* simple round-robin strategy */
1391 atomic_set(&rrpc->next_lun, -1);
1393 ret = rrpc_area_init(rrpc, &soffset);
1395 pr_err("nvm: rrpc: could not initialize area\n");
1396 return ERR_PTR(ret);
1398 rrpc->soffset = soffset;
1400 ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
1402 pr_err("nvm: rrpc: could not initialize luns\n");
1406 rrpc->poffset = dev->sec_per_lun * lun_begin;
1407 rrpc->lun_offset = lun_begin;
1409 ret = rrpc_core_init(rrpc);
1411 pr_err("nvm: rrpc: could not initialize core\n");
1415 ret = rrpc_map_init(rrpc);
1417 pr_err("nvm: rrpc: could not initialize maps\n");
1421 ret = rrpc_blocks_init(rrpc);
1423 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1427 ret = rrpc_luns_configure(rrpc);
1429 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1433 ret = rrpc_gc_init(rrpc);
1435 pr_err("nvm: rrpc: could not initialize gc\n");
1439 /* inherit the size from the underlying device */
1440 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1441 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1443 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1444 rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
1446 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1451 return ERR_PTR(ret);
1454 /* round robin, page-based FTL, and cost-based GC */
1455 static struct nvm_tgt_type tt_rrpc = {
1457 .version = {1, 0, 0},
1459 .make_rq = rrpc_make_rq,
1460 .capacity = rrpc_capacity,
1461 .end_io = rrpc_end_io,
1467 static int __init rrpc_module_init(void)
1469 return nvm_register_tgt_type(&tt_rrpc);
1472 static void rrpc_module_exit(void)
1474 nvm_unregister_tgt_type(&tt_rrpc);
1477 module_init(rrpc_module_init);
1478 module_exit(rrpc_module_exit);
1479 MODULE_LICENSE("GPL v2");
1480 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");