2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23 struct nvm_rq *rqd, unsigned long flags);
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
31 struct rrpc_block *rblk = a->rblk;
32 unsigned int pg_offset;
34 lockdep_assert_held(&rrpc->rev_lock);
36 if (a->addr == ADDR_EMPTY || !rblk)
39 spin_lock(&rblk->lock);
41 div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
42 WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
43 rblk->nr_invalid_pages++;
45 spin_unlock(&rblk->lock);
47 rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
50 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
55 spin_lock(&rrpc->rev_lock);
56 for (i = slba; i < slba + len; i++) {
57 struct rrpc_addr *gp = &rrpc->trans_map[i];
59 rrpc_page_invalidate(rrpc, gp);
62 spin_unlock(&rrpc->rev_lock);
65 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
66 sector_t laddr, unsigned int pages)
69 struct rrpc_inflight_rq *inf;
71 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
73 return ERR_PTR(-ENOMEM);
75 inf = rrpc_get_inflight_rq(rqd);
76 if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
77 mempool_free(rqd, rrpc->rq_pool);
84 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
86 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
88 rrpc_unlock_laddr(rrpc, inf);
90 mempool_free(rqd, rrpc->rq_pool);
93 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
95 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
96 sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
100 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
105 pr_err("rrpc: unable to acquire inflight IO\n");
110 rrpc_invalidate_range(rrpc, slba, len);
111 rrpc_inflight_laddr_release(rrpc, rqd);
114 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
116 return (rblk->next_page == rrpc->dev->sec_per_blk);
119 /* Calculate relative addr for the given block, considering instantiated LUNs */
120 static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
122 struct nvm_block *blk = rblk->parent;
123 int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
125 return lun_blk * rrpc->dev->sec_per_blk;
128 /* Calculate global addr for the given block */
129 static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
131 struct nvm_block *blk = rblk->parent;
133 return blk->id * rrpc->dev->sec_per_blk;
136 static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
140 int secs, pgs, blks, luns;
141 sector_t ppa = r.ppa;
145 div_u64_rem(ppa, dev->sec_per_pg, &secs);
148 sector_div(ppa, dev->sec_per_pg);
149 div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
152 sector_div(ppa, dev->pgs_per_blk);
153 div_u64_rem(ppa, dev->blks_per_lun, &blks);
156 sector_div(ppa, dev->blks_per_lun);
157 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
160 sector_div(ppa, dev->luns_per_chnl);
166 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
168 struct ppa_addr paddr;
171 return linear_to_generic_addr(dev, paddr);
174 /* requires lun->lock taken */
175 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
177 struct rrpc *rrpc = rlun->rrpc;
182 spin_lock(&rlun->cur->lock);
183 WARN_ON(!block_is_full(rrpc, rlun->cur));
184 spin_unlock(&rlun->cur->lock);
189 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
192 struct nvm_lun *lun = rlun->parent;
193 struct nvm_block *blk;
194 struct rrpc_block *rblk;
196 spin_lock(&lun->lock);
197 blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
199 pr_err("nvm: rrpc: cannot get new block from media manager\n");
200 spin_unlock(&lun->lock);
204 rblk = rrpc_get_rblk(rlun, blk->id);
205 list_add_tail(&rblk->list, &rlun->open_list);
206 spin_unlock(&lun->lock);
209 bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
211 rblk->nr_invalid_pages = 0;
212 atomic_set(&rblk->data_cmnt_size, 0);
217 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
219 struct rrpc_lun *rlun = rblk->rlun;
220 struct nvm_lun *lun = rlun->parent;
222 spin_lock(&lun->lock);
223 nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
224 list_del(&rblk->list);
225 spin_unlock(&lun->lock);
228 static void rrpc_put_blks(struct rrpc *rrpc)
230 struct rrpc_lun *rlun;
233 for (i = 0; i < rrpc->nr_luns; i++) {
234 rlun = &rrpc->luns[i];
236 rrpc_put_blk(rrpc, rlun->cur);
238 rrpc_put_blk(rrpc, rlun->gc_cur);
242 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
244 int next = atomic_inc_return(&rrpc->next_lun);
246 return &rrpc->luns[next % rrpc->nr_luns];
249 static void rrpc_gc_kick(struct rrpc *rrpc)
251 struct rrpc_lun *rlun;
254 for (i = 0; i < rrpc->nr_luns; i++) {
255 rlun = &rrpc->luns[i];
256 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
261 * timed GC every interval.
263 static void rrpc_gc_timer(unsigned long data)
265 struct rrpc *rrpc = (struct rrpc *)data;
268 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
271 static void rrpc_end_sync_bio(struct bio *bio)
273 struct completion *waiting = bio->bi_private;
276 pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
282 * rrpc_move_valid_pages -- migrate live data off the block
283 * @rrpc: the 'rrpc' structure
284 * @block: the block from which to migrate live pages
287 * GC algorithms may call this function to migrate remaining live
288 * pages off the block prior to erasing it. This function blocks
289 * further execution until the operation is complete.
291 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
293 struct request_queue *q = rrpc->dev->q;
294 struct rrpc_rev_addr *rev;
299 int nr_sec_per_blk = rrpc->dev->sec_per_blk;
301 DECLARE_COMPLETION_ONSTACK(wait);
303 if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
306 bio = bio_alloc(GFP_NOIO, 1);
308 pr_err("nvm: could not alloc bio to gc\n");
312 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
316 while ((slot = find_first_zero_bit(rblk->invalid_pages,
317 nr_sec_per_blk)) < nr_sec_per_blk) {
320 phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
323 spin_lock(&rrpc->rev_lock);
324 /* Get logical address from physical to logical table */
325 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
326 /* already updated by previous regular write */
327 if (rev->addr == ADDR_EMPTY) {
328 spin_unlock(&rrpc->rev_lock);
332 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
333 if (IS_ERR_OR_NULL(rqd)) {
334 spin_unlock(&rrpc->rev_lock);
339 spin_unlock(&rrpc->rev_lock);
341 /* Perform read to do GC */
342 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
344 bio->bi_private = &wait;
345 bio->bi_end_io = rrpc_end_sync_bio;
347 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
348 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
350 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
351 pr_err("rrpc: gc read failed.\n");
352 rrpc_inflight_laddr_release(rrpc, rqd);
355 wait_for_completion_io(&wait);
357 rrpc_inflight_laddr_release(rrpc, rqd);
362 reinit_completion(&wait);
364 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
366 bio->bi_private = &wait;
367 bio->bi_end_io = rrpc_end_sync_bio;
369 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
371 /* turn the command around and write the data back to a new
374 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
375 pr_err("rrpc: gc write failed.\n");
376 rrpc_inflight_laddr_release(rrpc, rqd);
379 wait_for_completion_io(&wait);
381 rrpc_inflight_laddr_release(rrpc, rqd);
389 mempool_free(page, rrpc->page_pool);
392 if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
393 pr_err("nvm: failed to garbage collect block\n");
400 static void rrpc_block_gc(struct work_struct *work)
402 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
404 struct rrpc *rrpc = gcb->rrpc;
405 struct rrpc_block *rblk = gcb->rblk;
406 struct nvm_dev *dev = rrpc->dev;
407 struct nvm_lun *lun = rblk->parent->lun;
408 struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
410 mempool_free(gcb, rrpc->gcb_pool);
411 pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
413 if (rrpc_move_valid_pages(rrpc, rblk))
416 if (nvm_erase_blk(dev, rblk->parent))
419 rrpc_put_blk(rrpc, rblk);
424 spin_lock(&rlun->lock);
425 list_add_tail(&rblk->prio, &rlun->prio_list);
426 spin_unlock(&rlun->lock);
429 /* the block with highest number of invalid pages, will be in the beginning
432 static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
433 struct rrpc_block *rb)
435 if (ra->nr_invalid_pages == rb->nr_invalid_pages)
438 return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
441 /* linearly find the block with highest number of invalid pages
444 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
446 struct list_head *prio_list = &rlun->prio_list;
447 struct rrpc_block *rblock, *max;
449 BUG_ON(list_empty(prio_list));
451 max = list_first_entry(prio_list, struct rrpc_block, prio);
452 list_for_each_entry(rblock, prio_list, prio)
453 max = rblock_max_invalid(max, rblock);
458 static void rrpc_lun_gc(struct work_struct *work)
460 struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
461 struct rrpc *rrpc = rlun->rrpc;
462 struct nvm_lun *lun = rlun->parent;
463 struct rrpc_block_gc *gcb;
464 unsigned int nr_blocks_need;
466 nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
468 if (nr_blocks_need < rrpc->nr_luns)
469 nr_blocks_need = rrpc->nr_luns;
471 spin_lock(&rlun->lock);
472 while (nr_blocks_need > lun->nr_free_blocks &&
473 !list_empty(&rlun->prio_list)) {
474 struct rrpc_block *rblock = block_prio_find_max(rlun);
475 struct nvm_block *block = rblock->parent;
477 if (!rblock->nr_invalid_pages)
480 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
484 list_del_init(&rblock->prio);
486 BUG_ON(!block_is_full(rrpc, rblock));
488 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
492 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
494 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
498 spin_unlock(&rlun->lock);
500 /* TODO: Hint that request queue can be started again */
503 static void rrpc_gc_queue(struct work_struct *work)
505 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
507 struct rrpc *rrpc = gcb->rrpc;
508 struct rrpc_block *rblk = gcb->rblk;
509 struct nvm_lun *lun = rblk->parent->lun;
510 struct nvm_block *blk = rblk->parent;
511 struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
513 spin_lock(&rlun->lock);
514 list_add_tail(&rblk->prio, &rlun->prio_list);
515 spin_unlock(&rlun->lock);
517 spin_lock(&lun->lock);
518 lun->nr_open_blocks--;
519 lun->nr_closed_blocks++;
520 blk->state &= ~NVM_BLK_ST_OPEN;
521 blk->state |= NVM_BLK_ST_CLOSED;
522 list_move_tail(&rblk->list, &rlun->closed_list);
523 spin_unlock(&lun->lock);
525 mempool_free(gcb, rrpc->gcb_pool);
526 pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
530 static const struct block_device_operations rrpc_fops = {
531 .owner = THIS_MODULE,
534 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
537 struct rrpc_lun *rlun, *max_free;
540 return get_next_lun(rrpc);
542 /* during GC, we don't care about RR, instead we want to make
543 * sure that we maintain evenness between the block luns.
545 max_free = &rrpc->luns[0];
546 /* prevent GC-ing lun from devouring pages of a lun with
547 * little free blocks. We don't take the lock as we only need an
550 rrpc_for_each_lun(rrpc, rlun, i) {
551 if (rlun->parent->nr_free_blocks >
552 max_free->parent->nr_free_blocks)
559 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
560 struct rrpc_block *rblk, u64 paddr)
562 struct rrpc_addr *gp;
563 struct rrpc_rev_addr *rev;
565 BUG_ON(laddr >= rrpc->nr_sects);
567 gp = &rrpc->trans_map[laddr];
568 spin_lock(&rrpc->rev_lock);
570 rrpc_page_invalidate(rrpc, gp);
575 rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
577 spin_unlock(&rrpc->rev_lock);
582 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
584 u64 addr = ADDR_EMPTY;
586 spin_lock(&rblk->lock);
587 if (block_is_full(rrpc, rblk))
590 addr = block_to_addr(rrpc, rblk) + rblk->next_page;
594 spin_unlock(&rblk->lock);
598 /* Simple round-robin Logical to physical address translation.
600 * Retrieve the mapping using the active append point. Then update the ap for
601 * the next write to the disk.
603 * Returns rrpc_addr with the physical address and block. Remember to return to
604 * rrpc->addr_cache when request is finished.
606 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
609 struct rrpc_lun *rlun;
610 struct rrpc_block *rblk;
614 rlun = rrpc_get_lun_rr(rrpc, is_gc);
617 if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
620 spin_lock(&rlun->lock);
624 paddr = rrpc_alloc_addr(rrpc, rblk);
626 if (paddr == ADDR_EMPTY) {
627 rblk = rrpc_get_blk(rrpc, rlun, 0);
629 rrpc_set_lun_cur(rlun, rblk);
634 /* retry from emergency gc block */
635 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
636 if (paddr == ADDR_EMPTY) {
637 rblk = rrpc_get_blk(rrpc, rlun, 1);
639 pr_err("rrpc: no more blocks");
644 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
650 spin_unlock(&rlun->lock);
651 return rrpc_update_map(rrpc, laddr, rblk, paddr);
653 spin_unlock(&rlun->lock);
657 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
659 struct rrpc_block_gc *gcb;
661 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
663 pr_err("rrpc: unable to queue block for gc.");
670 INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
671 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
674 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
675 sector_t laddr, uint8_t npages)
678 struct rrpc_block *rblk;
682 for (i = 0; i < npages; i++) {
683 p = &rrpc->trans_map[laddr + i];
685 lun = rblk->parent->lun;
687 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
688 if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
689 rrpc_run_gc(rrpc, rblk);
693 static void rrpc_end_io(struct nvm_rq *rqd)
695 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
696 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
697 uint8_t npages = rqd->nr_pages;
698 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
700 if (bio_data_dir(rqd->bio) == WRITE)
701 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
705 if (rrqd->flags & NVM_IOTYPE_GC)
708 rrpc_unlock_rq(rrpc, rqd);
711 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
713 nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
715 mempool_free(rqd, rrpc->rq_pool);
718 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
719 struct nvm_rq *rqd, unsigned long flags, int npages)
721 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
722 struct rrpc_addr *gp;
723 sector_t laddr = rrpc_get_laddr(bio);
724 int is_gc = flags & NVM_IOTYPE_GC;
727 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
728 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
729 return NVM_IO_REQUEUE;
732 for (i = 0; i < npages; i++) {
733 /* We assume that mapping occurs at 4KB granularity */
734 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
735 gp = &rrpc->trans_map[laddr + i];
738 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
742 rrpc_unlock_laddr(rrpc, r);
743 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
749 rqd->opcode = NVM_OP_HBREAD;
754 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
757 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
758 int is_gc = flags & NVM_IOTYPE_GC;
759 sector_t laddr = rrpc_get_laddr(bio);
760 struct rrpc_addr *gp;
762 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
763 return NVM_IO_REQUEUE;
765 BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
766 gp = &rrpc->trans_map[laddr];
769 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
772 rrpc_unlock_rq(rrpc, rqd);
776 rqd->opcode = NVM_OP_HBREAD;
782 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
783 struct nvm_rq *rqd, unsigned long flags, int npages)
785 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
787 sector_t laddr = rrpc_get_laddr(bio);
788 int is_gc = flags & NVM_IOTYPE_GC;
791 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
792 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
793 return NVM_IO_REQUEUE;
796 for (i = 0; i < npages; i++) {
797 /* We assume that mapping occurs at 4KB granularity */
798 p = rrpc_map_page(rrpc, laddr + i, is_gc);
801 rrpc_unlock_laddr(rrpc, r);
802 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
805 return NVM_IO_REQUEUE;
808 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
812 rqd->opcode = NVM_OP_HBWRITE;
817 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
818 struct nvm_rq *rqd, unsigned long flags)
820 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
822 int is_gc = flags & NVM_IOTYPE_GC;
823 sector_t laddr = rrpc_get_laddr(bio);
825 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
826 return NVM_IO_REQUEUE;
828 p = rrpc_map_page(rrpc, laddr, is_gc);
831 rrpc_unlock_rq(rrpc, rqd);
833 return NVM_IO_REQUEUE;
836 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
837 rqd->opcode = NVM_OP_HBWRITE;
843 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
844 struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
847 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
849 if (!rqd->ppa_list) {
850 pr_err("rrpc: not able to allocate ppa list\n");
854 if (bio_rw(bio) == WRITE)
855 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
858 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
861 if (bio_rw(bio) == WRITE)
862 return rrpc_write_rq(rrpc, bio, rqd, flags);
864 return rrpc_read_rq(rrpc, bio, rqd, flags);
867 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
868 struct nvm_rq *rqd, unsigned long flags)
871 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
872 uint8_t nr_pages = rrpc_get_pages(bio);
873 int bio_size = bio_sectors(bio) << 9;
875 if (bio_size < rrpc->dev->sec_size)
877 else if (bio_size > rrpc->dev->max_rq_size)
880 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
886 rqd->ins = &rrpc->instance;
887 rqd->nr_pages = nr_pages;
890 err = nvm_submit_io(rrpc->dev, rqd);
892 pr_err("rrpc: I/O submission failed: %d\n", err);
894 if (!(flags & NVM_IOTYPE_GC)) {
895 rrpc_unlock_rq(rrpc, rqd);
896 if (rqd->nr_pages > 1)
897 nvm_dev_dma_free(rrpc->dev,
898 rqd->ppa_list, rqd->dma_ppa_list);
906 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
908 struct rrpc *rrpc = q->queuedata;
912 if (bio->bi_rw & REQ_DISCARD) {
913 rrpc_discard(rrpc, bio);
914 return BLK_QC_T_NONE;
917 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
919 pr_err_ratelimited("rrpc: not able to queue bio.");
921 return BLK_QC_T_NONE;
923 memset(rqd, 0, sizeof(struct nvm_rq));
925 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
928 return BLK_QC_T_NONE;
936 spin_lock(&rrpc->bio_lock);
937 bio_list_add(&rrpc->requeue_bios, bio);
938 spin_unlock(&rrpc->bio_lock);
939 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
943 mempool_free(rqd, rrpc->rq_pool);
944 return BLK_QC_T_NONE;
947 static void rrpc_requeue(struct work_struct *work)
949 struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
950 struct bio_list bios;
953 bio_list_init(&bios);
955 spin_lock(&rrpc->bio_lock);
956 bio_list_merge(&bios, &rrpc->requeue_bios);
957 bio_list_init(&rrpc->requeue_bios);
958 spin_unlock(&rrpc->bio_lock);
960 while ((bio = bio_list_pop(&bios)))
961 rrpc_make_rq(rrpc->disk->queue, bio);
964 static void rrpc_gc_free(struct rrpc *rrpc)
966 struct rrpc_lun *rlun;
970 destroy_workqueue(rrpc->krqd_wq);
973 destroy_workqueue(rrpc->kgc_wq);
978 for (i = 0; i < rrpc->nr_luns; i++) {
979 rlun = &rrpc->luns[i];
987 static int rrpc_gc_init(struct rrpc *rrpc)
989 rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
994 rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
998 setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
1003 static void rrpc_map_free(struct rrpc *rrpc)
1005 vfree(rrpc->rev_trans_map);
1006 vfree(rrpc->trans_map);
1009 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
1011 struct rrpc *rrpc = (struct rrpc *)private;
1012 struct nvm_dev *dev = rrpc->dev;
1013 struct rrpc_addr *addr = rrpc->trans_map + slba;
1014 struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
1015 u64 elba = slba + nlb;
1018 if (unlikely(elba > dev->total_secs)) {
1019 pr_err("nvm: L2P data from device is out of bounds!\n");
1023 for (i = 0; i < nlb; i++) {
1024 u64 pba = le64_to_cpu(entries[i]);
1026 /* LNVM treats address-spaces as silos, LBA and PBA are
1027 * equally large and zero-indexed.
1029 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
1030 pr_err("nvm: L2P data entry is out of bounds!\n");
1034 /* Address zero is a special one. The first page on a disk is
1035 * protected. As it often holds internal device boot
1041 div_u64_rem(pba, rrpc->nr_sects, &mod);
1044 raddr[mod].addr = slba + i;
1050 static int rrpc_map_init(struct rrpc *rrpc)
1052 struct nvm_dev *dev = rrpc->dev;
1056 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
1057 if (!rrpc->trans_map)
1060 rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1062 if (!rrpc->rev_trans_map)
1065 for (i = 0; i < rrpc->nr_sects; i++) {
1066 struct rrpc_addr *p = &rrpc->trans_map[i];
1067 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1069 p->addr = ADDR_EMPTY;
1070 r->addr = ADDR_EMPTY;
1073 if (!dev->ops->get_l2p_tbl)
1076 /* Bring up the mapping table from device */
1077 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, rrpc_l2p_update,
1080 pr_err("nvm: rrpc: could not read L2P table.\n");
1088 /* Minimum pages needed within a lun */
1089 #define PAGE_POOL_SIZE 16
1090 #define ADDR_POOL_SIZE 64
1092 static int rrpc_core_init(struct rrpc *rrpc)
1094 down_write(&rrpc_lock);
1095 if (!rrpc_gcb_cache) {
1096 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1097 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1098 if (!rrpc_gcb_cache) {
1099 up_write(&rrpc_lock);
1103 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1104 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1106 if (!rrpc_rq_cache) {
1107 kmem_cache_destroy(rrpc_gcb_cache);
1108 up_write(&rrpc_lock);
1112 up_write(&rrpc_lock);
1114 rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1115 if (!rrpc->page_pool)
1118 rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
1120 if (!rrpc->gcb_pool)
1123 rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1127 spin_lock_init(&rrpc->inflights.lock);
1128 INIT_LIST_HEAD(&rrpc->inflights.reqs);
1133 static void rrpc_core_free(struct rrpc *rrpc)
1135 mempool_destroy(rrpc->page_pool);
1136 mempool_destroy(rrpc->gcb_pool);
1137 mempool_destroy(rrpc->rq_pool);
1140 static void rrpc_luns_free(struct rrpc *rrpc)
1145 static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1147 struct nvm_dev *dev = rrpc->dev;
1148 struct rrpc_lun *rlun;
1151 if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1152 pr_err("rrpc: number of pages per block too high.");
1156 spin_lock_init(&rrpc->rev_lock);
1158 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1164 for (i = 0; i < rrpc->nr_luns; i++) {
1165 struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
1167 rlun = &rrpc->luns[i];
1170 INIT_LIST_HEAD(&rlun->prio_list);
1171 INIT_LIST_HEAD(&rlun->open_list);
1172 INIT_LIST_HEAD(&rlun->closed_list);
1174 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1175 spin_lock_init(&rlun->lock);
1177 rrpc->total_blocks += dev->blks_per_lun;
1178 rrpc->nr_sects += dev->sec_per_lun;
1180 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1181 rrpc->dev->blks_per_lun);
1185 for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
1186 struct rrpc_block *rblk = &rlun->blocks[j];
1187 struct nvm_block *blk = &lun->blocks[j];
1191 INIT_LIST_HEAD(&rblk->prio);
1192 spin_lock_init(&rblk->lock);
1201 static void rrpc_free(struct rrpc *rrpc)
1204 rrpc_map_free(rrpc);
1205 rrpc_core_free(rrpc);
1206 rrpc_luns_free(rrpc);
1211 static void rrpc_exit(void *private)
1213 struct rrpc *rrpc = private;
1215 del_timer(&rrpc->gc_timer);
1217 flush_workqueue(rrpc->krqd_wq);
1218 flush_workqueue(rrpc->kgc_wq);
1223 static sector_t rrpc_capacity(void *private)
1225 struct rrpc *rrpc = private;
1226 struct nvm_dev *dev = rrpc->dev;
1227 sector_t reserved, provisioned;
1229 /* cur, gc, and two emergency blocks for each lun */
1230 reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
1231 provisioned = rrpc->nr_sects - reserved;
1233 if (reserved > rrpc->nr_sects) {
1234 pr_err("rrpc: not enough space available to expose storage.\n");
1238 sector_div(provisioned, 10);
1239 return provisioned * 9 * NR_PHY_IN_LOG;
1243 * Looks up the logical address from reverse trans map and check if its valid by
1244 * comparing the logical to physical address with the physical address.
1245 * Returns 0 on free, otherwise 1 if in use
1247 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1249 struct nvm_dev *dev = rrpc->dev;
1251 struct rrpc_addr *laddr;
1252 u64 bpaddr, paddr, pladdr;
1254 bpaddr = block_to_rel_addr(rrpc, rblk);
1255 for (offset = 0; offset < dev->sec_per_blk; offset++) {
1256 paddr = bpaddr + offset;
1258 pladdr = rrpc->rev_trans_map[paddr].addr;
1259 if (pladdr == ADDR_EMPTY)
1262 laddr = &rrpc->trans_map[pladdr];
1264 if (paddr == laddr->addr) {
1267 set_bit(offset, rblk->invalid_pages);
1268 rblk->nr_invalid_pages++;
1273 static int rrpc_blocks_init(struct rrpc *rrpc)
1275 struct rrpc_lun *rlun;
1276 struct rrpc_block *rblk;
1277 int lun_iter, blk_iter;
1279 for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1280 rlun = &rrpc->luns[lun_iter];
1282 for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
1284 rblk = &rlun->blocks[blk_iter];
1285 rrpc_block_map_update(rrpc, rblk);
1292 static int rrpc_luns_configure(struct rrpc *rrpc)
1294 struct rrpc_lun *rlun;
1295 struct rrpc_block *rblk;
1298 for (i = 0; i < rrpc->nr_luns; i++) {
1299 rlun = &rrpc->luns[i];
1301 rblk = rrpc_get_blk(rrpc, rlun, 0);
1305 rrpc_set_lun_cur(rlun, rblk);
1307 /* Emergency gc block */
1308 rblk = rrpc_get_blk(rrpc, rlun, 1);
1311 rlun->gc_cur = rblk;
1316 rrpc_put_blks(rrpc);
1320 static struct nvm_tgt_type tt_rrpc;
1322 static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
1323 int lun_begin, int lun_end)
1325 struct request_queue *bqueue = dev->q;
1326 struct request_queue *tqueue = tdisk->queue;
1330 if (!(dev->identity.dom & NVM_RSP_L2P)) {
1331 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1333 return ERR_PTR(-EINVAL);
1336 rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1338 return ERR_PTR(-ENOMEM);
1340 rrpc->instance.tt = &tt_rrpc;
1344 bio_list_init(&rrpc->requeue_bios);
1345 spin_lock_init(&rrpc->bio_lock);
1346 INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1348 rrpc->nr_luns = lun_end - lun_begin + 1;
1350 /* simple round-robin strategy */
1351 atomic_set(&rrpc->next_lun, -1);
1353 ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
1355 pr_err("nvm: rrpc: could not initialize luns\n");
1359 rrpc->poffset = dev->sec_per_lun * lun_begin;
1360 rrpc->lun_offset = lun_begin;
1362 ret = rrpc_core_init(rrpc);
1364 pr_err("nvm: rrpc: could not initialize core\n");
1368 ret = rrpc_map_init(rrpc);
1370 pr_err("nvm: rrpc: could not initialize maps\n");
1374 ret = rrpc_blocks_init(rrpc);
1376 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1380 ret = rrpc_luns_configure(rrpc);
1382 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1386 ret = rrpc_gc_init(rrpc);
1388 pr_err("nvm: rrpc: could not initialize gc\n");
1392 /* inherit the size from the underlying device */
1393 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1394 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1396 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1397 rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
1399 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1404 return ERR_PTR(ret);
1407 /* round robin, page-based FTL, and cost-based GC */
1408 static struct nvm_tgt_type tt_rrpc = {
1410 .version = {1, 0, 0},
1412 .make_rq = rrpc_make_rq,
1413 .capacity = rrpc_capacity,
1414 .end_io = rrpc_end_io,
1420 static int __init rrpc_module_init(void)
1422 return nvm_register_target(&tt_rrpc);
1425 static void rrpc_module_exit(void)
1427 nvm_unregister_target(&tt_rrpc);
1430 module_init(rrpc_module_init);
1431 module_exit(rrpc_module_exit);
1432 MODULE_LICENSE("GPL v2");
1433 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");