1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-core.c - pblk's core functionality
20 #define CREATE_TRACE_POINTS
23 #include "pblk-trace.h"
25 static void pblk_line_mark_bb(struct work_struct *work)
27 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
29 struct pblk *pblk = line_ws->pblk;
30 struct nvm_tgt_dev *dev = pblk->dev;
31 struct ppa_addr *ppa = line_ws->priv;
34 ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
36 struct pblk_line *line;
39 line = pblk_ppa_to_line(pblk, *ppa);
40 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
42 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
47 mempool_free(line_ws, &pblk->gen_ws_pool);
50 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
51 struct ppa_addr ppa_addr)
53 struct nvm_tgt_dev *dev = pblk->dev;
54 struct nvm_geo *geo = &dev->geo;
56 int pos = pblk_ppa_to_pos(geo, ppa_addr);
58 pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
59 atomic_long_inc(&pblk->erase_failed);
61 atomic_dec(&line->blk_in_line);
62 if (test_and_set_bit(pos, line->blk_bitmap))
63 pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
66 /* Not necessary to mark bad blocks on 2.0 spec. */
67 if (geo->version == NVM_OCSSD_SPEC_20)
70 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
75 pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
76 GFP_ATOMIC, pblk->bb_wq);
79 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
81 struct nvm_tgt_dev *dev = pblk->dev;
82 struct nvm_geo *geo = &dev->geo;
83 struct nvm_chk_meta *chunk;
84 struct pblk_line *line;
87 line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
88 pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
89 chunk = &line->chks[pos];
91 atomic_dec(&line->left_seblks);
94 trace_pblk_chunk_reset(pblk_disk_name(pblk),
95 &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
97 chunk->state = NVM_CHK_ST_OFFLINE;
98 pblk_mark_bb(pblk, line, rqd->ppa_addr);
100 trace_pblk_chunk_reset(pblk_disk_name(pblk),
101 &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
103 chunk->state = NVM_CHK_ST_FREE;
106 trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
109 atomic_dec(&pblk->inflight_io);
112 /* Erase completion assumes that only one block is erased at the time */
113 static void pblk_end_io_erase(struct nvm_rq *rqd)
115 struct pblk *pblk = rqd->private;
117 __pblk_end_io_erase(pblk, rqd);
118 mempool_free(rqd, &pblk->e_rq_pool);
122 * Get information for all chunks from the device.
124 * The caller is responsible for freeing (vmalloc) the returned structure
126 struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
128 struct nvm_tgt_dev *dev = pblk->dev;
129 struct nvm_geo *geo = &dev->geo;
130 struct nvm_chk_meta *meta;
137 len = geo->all_chunks * sizeof(*meta);
140 return ERR_PTR(-ENOMEM);
142 ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
145 return ERR_PTR(-EIO);
151 struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
152 struct nvm_chk_meta *meta,
155 struct nvm_tgt_dev *dev = pblk->dev;
156 struct nvm_geo *geo = &dev->geo;
157 int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
158 int lun_off = ppa.m.pu * geo->num_chk;
159 int chk_off = ppa.m.chk;
161 return meta + ch_off + lun_off + chk_off;
164 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
167 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
168 struct list_head *move_list = NULL;
170 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
171 * table is modified with reclaimed sectors, a check is done to endure
172 * that newer updates are not overwritten.
174 spin_lock(&line->lock);
175 WARN_ON(line->state == PBLK_LINESTATE_FREE);
177 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
178 WARN_ONCE(1, "pblk: double invalidate\n");
179 spin_unlock(&line->lock);
182 le32_add_cpu(line->vsc, -1);
184 if (line->state == PBLK_LINESTATE_CLOSED)
185 move_list = pblk_line_gc_list(pblk, line);
186 spin_unlock(&line->lock);
189 spin_lock(&l_mg->gc_lock);
190 spin_lock(&line->lock);
191 /* Prevent moving a line that has just been chosen for GC */
192 if (line->state == PBLK_LINESTATE_GC) {
193 spin_unlock(&line->lock);
194 spin_unlock(&l_mg->gc_lock);
197 spin_unlock(&line->lock);
199 list_move_tail(&line->list, move_list);
200 spin_unlock(&l_mg->gc_lock);
204 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
206 struct pblk_line *line;
209 #ifdef CONFIG_NVM_PBLK_DEBUG
210 /* Callers must ensure that the ppa points to a device address */
211 BUG_ON(pblk_addr_in_cache(ppa));
212 BUG_ON(pblk_ppa_empty(ppa));
215 line = pblk_ppa_to_line(pblk, ppa);
216 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
218 __pblk_map_invalidate(pblk, line, paddr);
221 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
222 unsigned int nr_secs)
226 spin_lock(&pblk->trans_lock);
227 for (lba = slba; lba < slba + nr_secs; lba++) {
230 ppa = pblk_trans_map_get(pblk, lba);
232 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
233 pblk_map_invalidate(pblk, ppa);
235 pblk_ppa_set_empty(&ppa);
236 pblk_trans_map_set(pblk, lba, ppa);
238 spin_unlock(&pblk->trans_lock);
241 int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
243 struct nvm_tgt_dev *dev = pblk->dev;
245 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
246 &rqd->dma_meta_list);
250 if (rqd->nr_ppas == 1)
253 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
254 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
259 void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
261 struct nvm_tgt_dev *dev = pblk->dev;
264 nvm_dev_dma_free(dev->parent, rqd->meta_list,
268 /* Caller must guarantee that the request is a valid type */
269 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
278 pool = &pblk->w_rq_pool;
279 rq_size = pblk_w_rq_size;
282 pool = &pblk->r_rq_pool;
283 rq_size = pblk_g_rq_size;
286 pool = &pblk->e_rq_pool;
287 rq_size = pblk_g_rq_size;
290 rqd = mempool_alloc(pool, GFP_KERNEL);
291 memset(rqd, 0, rq_size);
296 /* Typically used on completion path. Cannot guarantee request consistency */
297 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
303 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
306 pool = &pblk->w_rq_pool;
309 pool = &pblk->r_rq_pool;
312 pool = &pblk->e_rq_pool;
315 pblk_err(pblk, "trying to free unknown rqd type\n");
319 pblk_free_rqd_meta(pblk, rqd);
320 mempool_free(rqd, pool);
323 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
329 WARN_ON(off + nr_pages != bio->bi_vcnt);
331 for (i = off; i < nr_pages + off; i++) {
332 bv = bio->bi_io_vec[i];
333 mempool_free(bv.bv_page, &pblk->page_bio_pool);
337 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
340 struct request_queue *q = pblk->dev->q;
344 for (i = 0; i < nr_pages; i++) {
345 page = mempool_alloc(&pblk->page_bio_pool, flags);
347 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
348 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
349 pblk_err(pblk, "could not add page to bio\n");
350 mempool_free(page, &pblk->page_bio_pool);
357 pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
361 void pblk_write_kick(struct pblk *pblk)
363 wake_up_process(pblk->writer_ts);
364 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
367 void pblk_write_timer_fn(struct timer_list *t)
369 struct pblk *pblk = from_timer(pblk, t, wtimer);
371 /* kick the write thread every tick to flush outstanding data */
372 pblk_write_kick(pblk);
375 void pblk_write_should_kick(struct pblk *pblk)
377 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
379 if (secs_avail >= pblk->min_write_pgs_data)
380 pblk_write_kick(pblk);
383 static void pblk_wait_for_meta(struct pblk *pblk)
386 if (!atomic_read(&pblk->inflight_io))
393 static void pblk_flush_writer(struct pblk *pblk)
395 pblk_rb_flush(&pblk->rwb);
397 if (!pblk_rb_sync_count(&pblk->rwb))
400 pblk_write_kick(pblk);
405 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
407 struct pblk_line_meta *lm = &pblk->lm;
408 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
409 struct list_head *move_list = NULL;
410 int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
411 * (pblk->min_write_pgs - pblk->min_write_pgs_data);
412 int vsc = le32_to_cpu(*line->vsc) + packed_meta;
414 lockdep_assert_held(&line->lock);
416 if (line->w_err_gc->has_write_err) {
417 if (line->gc_group != PBLK_LINEGC_WERR) {
418 line->gc_group = PBLK_LINEGC_WERR;
419 move_list = &l_mg->gc_werr_list;
420 pblk_rl_werr_line_in(&pblk->rl);
423 if (line->gc_group != PBLK_LINEGC_FULL) {
424 line->gc_group = PBLK_LINEGC_FULL;
425 move_list = &l_mg->gc_full_list;
427 } else if (vsc < lm->high_thrs) {
428 if (line->gc_group != PBLK_LINEGC_HIGH) {
429 line->gc_group = PBLK_LINEGC_HIGH;
430 move_list = &l_mg->gc_high_list;
432 } else if (vsc < lm->mid_thrs) {
433 if (line->gc_group != PBLK_LINEGC_MID) {
434 line->gc_group = PBLK_LINEGC_MID;
435 move_list = &l_mg->gc_mid_list;
437 } else if (vsc < line->sec_in_line) {
438 if (line->gc_group != PBLK_LINEGC_LOW) {
439 line->gc_group = PBLK_LINEGC_LOW;
440 move_list = &l_mg->gc_low_list;
442 } else if (vsc == line->sec_in_line) {
443 if (line->gc_group != PBLK_LINEGC_EMPTY) {
444 line->gc_group = PBLK_LINEGC_EMPTY;
445 move_list = &l_mg->gc_empty_list;
448 line->state = PBLK_LINESTATE_CORRUPT;
449 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
452 line->gc_group = PBLK_LINEGC_NONE;
453 move_list = &l_mg->corrupt_list;
454 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
457 lm->high_thrs, lm->mid_thrs);
463 void pblk_discard(struct pblk *pblk, struct bio *bio)
465 sector_t slba = pblk_get_lba(bio);
466 sector_t nr_secs = pblk_get_secs(bio);
468 pblk_invalidate_range(pblk, slba, nr_secs);
471 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
473 atomic_long_inc(&pblk->write_failed);
474 #ifdef CONFIG_NVM_PBLK_DEBUG
475 pblk_print_failed_rqd(pblk, rqd, rqd->error);
479 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
481 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
482 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
483 atomic_long_inc(&pblk->read_empty);
487 switch (rqd->error) {
488 case NVM_RSP_WARN_HIGHECC:
489 atomic_long_inc(&pblk->read_high_ecc);
491 case NVM_RSP_ERR_FAILECC:
492 case NVM_RSP_ERR_FAILCRC:
493 atomic_long_inc(&pblk->read_failed);
496 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
498 #ifdef CONFIG_NVM_PBLK_DEBUG
499 pblk_print_failed_rqd(pblk, rqd, rqd->error);
503 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
505 pblk->sec_per_write = sec_per_write;
508 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
510 struct nvm_tgt_dev *dev = pblk->dev;
512 atomic_inc(&pblk->inflight_io);
514 #ifdef CONFIG_NVM_PBLK_DEBUG
515 if (pblk_check_io(pblk, rqd))
519 return nvm_submit_io(dev, rqd);
522 void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
524 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
528 for (i = 0; i < rqd->nr_ppas; i++) {
529 struct ppa_addr *ppa = &ppa_list[i];
530 struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
531 u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
534 trace_pblk_chunk_state(pblk_disk_name(pblk),
535 ppa, NVM_CHK_ST_OPEN);
536 else if (caddr == (chunk->cnlb - 1))
537 trace_pblk_chunk_state(pblk_disk_name(pblk),
538 ppa, NVM_CHK_ST_CLOSED);
542 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
544 struct nvm_tgt_dev *dev = pblk->dev;
547 atomic_inc(&pblk->inflight_io);
549 #ifdef CONFIG_NVM_PBLK_DEBUG
550 if (pblk_check_io(pblk, rqd))
554 ret = nvm_submit_io_sync(dev, rqd);
556 if (trace_pblk_chunk_state_enabled() && !ret &&
557 rqd->opcode == NVM_OP_PWRITE)
558 pblk_check_chunk_state_update(pblk, rqd);
563 int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
565 struct ppa_addr *ppa_list;
568 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
570 pblk_down_chunk(pblk, ppa_list[0]);
571 ret = pblk_submit_io_sync(pblk, rqd);
572 pblk_up_chunk(pblk, ppa_list[0]);
577 static void pblk_bio_map_addr_endio(struct bio *bio)
582 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
583 unsigned int nr_secs, unsigned int len,
584 int alloc_type, gfp_t gfp_mask)
586 struct nvm_tgt_dev *dev = pblk->dev;
592 if (alloc_type == PBLK_KMALLOC_META)
593 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
595 bio = bio_kmalloc(gfp_mask, nr_secs);
597 return ERR_PTR(-ENOMEM);
599 for (i = 0; i < nr_secs; i++) {
600 page = vmalloc_to_page(kaddr);
602 pblk_err(pblk, "could not map vmalloc bio\n");
604 bio = ERR_PTR(-ENOMEM);
608 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
609 if (ret != PAGE_SIZE) {
610 pblk_err(pblk, "could not add page to bio\n");
612 bio = ERR_PTR(-ENOMEM);
619 bio->bi_end_io = pblk_bio_map_addr_endio;
624 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
625 unsigned long secs_to_flush, bool skip_meta)
627 int max = pblk->sec_per_write;
628 int min = pblk->min_write_pgs;
629 int secs_to_sync = 0;
631 if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
632 min = max = pblk->min_write_pgs_data;
634 if (secs_avail >= max)
636 else if (secs_avail >= min)
637 secs_to_sync = min * (secs_avail / min);
638 else if (secs_to_flush)
644 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
649 spin_lock(&line->lock);
650 addr = find_next_zero_bit(line->map_bitmap,
651 pblk->lm.sec_per_line, line->cur_sec);
652 line->cur_sec = addr - nr_secs;
654 for (i = 0; i < nr_secs; i++, line->cur_sec--)
655 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
656 spin_unlock(&line->lock);
659 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
664 lockdep_assert_held(&line->lock);
666 /* logic error: ppa out-of-bounds. Prevent generating bad address */
667 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
668 WARN(1, "pblk: page allocation out of bounds\n");
669 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
672 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
673 pblk->lm.sec_per_line, line->cur_sec);
674 for (i = 0; i < nr_secs; i++, line->cur_sec++)
675 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
680 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
684 /* Lock needed in case a write fails and a recovery needs to remap
685 * failed write buffer entries
687 spin_lock(&line->lock);
688 addr = __pblk_alloc_page(pblk, line, nr_secs);
689 line->left_msecs -= nr_secs;
690 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
691 spin_unlock(&line->lock);
696 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
700 spin_lock(&line->lock);
701 paddr = find_next_zero_bit(line->map_bitmap,
702 pblk->lm.sec_per_line, line->cur_sec);
703 spin_unlock(&line->lock);
708 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
710 struct nvm_tgt_dev *dev = pblk->dev;
711 struct nvm_geo *geo = &dev->geo;
712 struct pblk_line_meta *lm = &pblk->lm;
715 /* This usually only happens on bad lines */
716 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
717 if (bit >= lm->blk_per_line)
720 return bit * geo->ws_opt;
723 int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
725 struct nvm_tgt_dev *dev = pblk->dev;
726 struct pblk_line_meta *lm = &pblk->lm;
729 u64 paddr = pblk_line_smeta_start(pblk, line);
732 memset(&rqd, 0, sizeof(struct nvm_rq));
734 ret = pblk_alloc_rqd_meta(pblk, &rqd);
738 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
744 bio->bi_iter.bi_sector = 0; /* internal bio */
745 bio_set_op_attrs(bio, REQ_OP_READ, 0);
748 rqd.opcode = NVM_OP_PREAD;
749 rqd.nr_ppas = lm->smeta_sec;
752 for (i = 0; i < lm->smeta_sec; i++, paddr++)
753 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
755 ret = pblk_submit_io_sync(pblk, &rqd);
757 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
762 atomic_dec(&pblk->inflight_io);
764 if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
765 pblk_log_read_err(pblk, &rqd);
770 pblk_free_rqd_meta(pblk, &rqd);
774 static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
777 struct nvm_tgt_dev *dev = pblk->dev;
778 struct pblk_line_meta *lm = &pblk->lm;
781 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
782 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
785 memset(&rqd, 0, sizeof(struct nvm_rq));
787 ret = pblk_alloc_rqd_meta(pblk, &rqd);
791 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
797 bio->bi_iter.bi_sector = 0; /* internal bio */
798 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
801 rqd.opcode = NVM_OP_PWRITE;
802 rqd.nr_ppas = lm->smeta_sec;
805 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
806 struct pblk_sec_meta *meta = pblk_get_meta(pblk,
809 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
810 meta->lba = lba_list[paddr] = addr_empty;
813 ret = pblk_submit_io_sync_sem(pblk, &rqd);
815 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
820 atomic_dec(&pblk->inflight_io);
823 pblk_log_write_err(pblk, &rqd);
828 pblk_free_rqd_meta(pblk, &rqd);
832 int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
835 struct nvm_tgt_dev *dev = pblk->dev;
836 struct nvm_geo *geo = &dev->geo;
837 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
838 struct pblk_line_meta *lm = &pblk->lm;
839 void *ppa_list, *meta_list;
842 u64 paddr = line->emeta_ssec;
843 dma_addr_t dma_ppa_list, dma_meta_list;
844 int min = pblk->min_write_pgs;
845 int left_ppas = lm->emeta_sec[0];
846 int line_id = line->id;
851 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
856 ppa_list = meta_list + pblk_dma_meta_size(pblk);
857 dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
860 memset(&rqd, 0, sizeof(struct nvm_rq));
862 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
863 rq_len = rq_ppas * geo->csecs;
865 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
866 l_mg->emeta_alloc_type, GFP_KERNEL);
872 bio->bi_iter.bi_sector = 0; /* internal bio */
873 bio_set_op_attrs(bio, REQ_OP_READ, 0);
876 rqd.meta_list = meta_list;
877 rqd.ppa_list = ppa_list;
878 rqd.dma_meta_list = dma_meta_list;
879 rqd.dma_ppa_list = dma_ppa_list;
880 rqd.opcode = NVM_OP_PREAD;
881 rqd.nr_ppas = rq_ppas;
883 for (i = 0; i < rqd.nr_ppas; ) {
884 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
885 int pos = pblk_ppa_to_pos(geo, ppa);
887 if (pblk_io_aligned(pblk, rq_ppas))
890 while (test_bit(pos, line->blk_bitmap)) {
892 if (pblk_boundary_paddr_checks(pblk, paddr)) {
898 ppa = addr_to_gen_ppa(pblk, paddr, line_id);
899 pos = pblk_ppa_to_pos(geo, ppa);
902 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
908 for (j = 0; j < min; j++, i++, paddr++)
909 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
912 ret = pblk_submit_io_sync(pblk, &rqd);
914 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
919 atomic_dec(&pblk->inflight_io);
921 if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
922 pblk_log_read_err(pblk, &rqd);
928 left_ppas -= rq_ppas;
933 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
937 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
940 rqd->opcode = NVM_OP_ERASE;
947 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
949 struct nvm_rq rqd = {NULL};
952 trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
953 PBLK_CHUNK_RESET_START);
955 pblk_setup_e_rq(pblk, &rqd, ppa);
957 /* The write thread schedules erases so that it minimizes disturbances
958 * with writes. Thus, there is no need to take the LUN semaphore.
960 ret = pblk_submit_io_sync(pblk, &rqd);
962 __pblk_end_io_erase(pblk, &rqd);
967 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
969 struct pblk_line_meta *lm = &pblk->lm;
973 /* Erase only good blocks, one at a time */
975 spin_lock(&line->lock);
976 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
978 if (bit >= lm->blk_per_line) {
979 spin_unlock(&line->lock);
983 ppa = pblk->luns[bit].bppa; /* set ch and lun */
984 ppa.a.blk = line->id;
986 atomic_dec(&line->left_eblks);
987 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
988 spin_unlock(&line->lock);
990 ret = pblk_blk_erase_sync(pblk, ppa);
992 pblk_err(pblk, "failed to erase line %d\n", line->id);
1000 static void pblk_line_setup_metadata(struct pblk_line *line,
1001 struct pblk_line_mgmt *l_mg,
1002 struct pblk_line_meta *lm)
1006 lockdep_assert_held(&l_mg->free_lock);
1009 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1010 if (meta_line == PBLK_DATA_LINES) {
1011 spin_unlock(&l_mg->free_lock);
1013 spin_lock(&l_mg->free_lock);
1017 set_bit(meta_line, &l_mg->meta_bitmap);
1018 line->meta_line = meta_line;
1020 line->smeta = l_mg->sline_meta[meta_line];
1021 line->emeta = l_mg->eline_meta[meta_line];
1023 memset(line->smeta, 0, lm->smeta_len);
1024 memset(line->emeta->buf, 0, lm->emeta_len[0]);
1026 line->emeta->mem = 0;
1027 atomic_set(&line->emeta->sync, 0);
1030 /* For now lines are always assumed full lines. Thus, smeta former and current
1031 * lun bitmaps are omitted.
1033 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
1034 struct pblk_line *cur)
1036 struct nvm_tgt_dev *dev = pblk->dev;
1037 struct nvm_geo *geo = &dev->geo;
1038 struct pblk_line_meta *lm = &pblk->lm;
1039 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1040 struct pblk_emeta *emeta = line->emeta;
1041 struct line_emeta *emeta_buf = emeta->buf;
1042 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
1045 /* After erasing the line, new bad blocks might appear and we risk
1046 * having an invalid line
1048 nr_blk_line = lm->blk_per_line -
1049 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
1050 if (nr_blk_line < lm->min_blk_line) {
1051 spin_lock(&l_mg->free_lock);
1052 spin_lock(&line->lock);
1053 line->state = PBLK_LINESTATE_BAD;
1054 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1056 spin_unlock(&line->lock);
1058 list_add_tail(&line->list, &l_mg->bad_list);
1059 spin_unlock(&l_mg->free_lock);
1061 pblk_debug(pblk, "line %d is bad\n", line->id);
1066 /* Run-time metadata */
1067 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
1069 /* Mark LUNs allocated in this line (all for now) */
1070 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1072 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1073 guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid);
1074 smeta_buf->header.id = cpu_to_le32(line->id);
1075 smeta_buf->header.type = cpu_to_le16(line->type);
1076 smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1077 smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
1079 /* Start metadata */
1080 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1081 smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1083 /* Fill metadata among lines */
1085 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1086 smeta_buf->prev_id = cpu_to_le32(cur->id);
1087 cur->emeta->buf->next_id = cpu_to_le32(line->id);
1089 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1092 /* All smeta must be set at this point */
1093 smeta_buf->header.crc = cpu_to_le32(
1094 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1095 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1098 memcpy(&emeta_buf->header, &smeta_buf->header,
1099 sizeof(struct line_header));
1101 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1102 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1103 emeta_buf->header.crc = cpu_to_le32(
1104 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1106 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1107 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1108 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1109 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1110 emeta_buf->crc = cpu_to_le32(0);
1111 emeta_buf->prev_id = smeta_buf->prev_id;
1116 static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1118 struct pblk_line_meta *lm = &pblk->lm;
1119 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1121 line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1122 if (!line->map_bitmap)
1125 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1127 /* will be initialized using bb info from map_bitmap */
1128 line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1129 if (!line->invalid_bitmap) {
1130 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1131 line->map_bitmap = NULL;
1138 /* For now lines are always assumed full lines. Thus, smeta former and current
1139 * lun bitmaps are omitted.
1141 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1144 struct nvm_tgt_dev *dev = pblk->dev;
1145 struct nvm_geo *geo = &dev->geo;
1146 struct pblk_line_meta *lm = &pblk->lm;
1147 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1152 line->sec_in_line = lm->sec_per_line;
1154 /* Capture bad block information on line mapping bitmaps */
1155 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1156 bit + 1)) < lm->blk_per_line) {
1157 off = bit * geo->ws_opt;
1158 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1160 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1162 line->sec_in_line -= geo->clba;
1165 /* Mark smeta metadata sectors as bad sectors */
1166 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1167 off = bit * geo->ws_opt;
1168 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1169 line->sec_in_line -= lm->smeta_sec;
1170 line->cur_sec = off + lm->smeta_sec;
1172 if (init && pblk_line_smeta_write(pblk, line, off)) {
1173 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1177 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1179 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1180 * blocks to make sure that there are enough sectors to store emeta
1182 emeta_secs = lm->emeta_sec[0];
1183 off = lm->sec_per_line;
1184 while (emeta_secs) {
1186 if (!test_bit(off, line->invalid_bitmap)) {
1187 bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1188 emeta_secs -= geo->ws_opt;
1192 line->emeta_ssec = off;
1193 line->sec_in_line -= lm->emeta_sec[0];
1194 line->nr_valid_lbas = 0;
1195 line->left_msecs = line->sec_in_line;
1196 *line->vsc = cpu_to_le32(line->sec_in_line);
1198 if (lm->sec_per_line - line->sec_in_line !=
1199 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1200 spin_lock(&line->lock);
1201 line->state = PBLK_LINESTATE_BAD;
1202 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1204 spin_unlock(&line->lock);
1206 list_add_tail(&line->list, &l_mg->bad_list);
1207 pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1215 static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1217 struct pblk_line_meta *lm = &pblk->lm;
1218 struct nvm_tgt_dev *dev = pblk->dev;
1219 struct nvm_geo *geo = &dev->geo;
1220 int blk_to_erase = atomic_read(&line->blk_in_line);
1223 for (i = 0; i < lm->blk_per_line; i++) {
1224 struct pblk_lun *rlun = &pblk->luns[i];
1225 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1226 int state = line->chks[pos].state;
1228 /* Free chunks should not be erased */
1229 if (state & NVM_CHK_ST_FREE) {
1230 set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1231 line->erase_bitmap);
1236 return blk_to_erase;
1239 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1241 struct pblk_line_meta *lm = &pblk->lm;
1242 int blk_in_line = atomic_read(&line->blk_in_line);
1245 /* Bad blocks do not need to be erased */
1246 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1248 spin_lock(&line->lock);
1250 /* If we have not written to this line, we need to mark up free chunks
1253 if (line->state == PBLK_LINESTATE_NEW) {
1254 blk_to_erase = pblk_prepare_new_line(pblk, line);
1255 line->state = PBLK_LINESTATE_FREE;
1256 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1259 blk_to_erase = blk_in_line;
1262 if (blk_in_line < lm->min_blk_line) {
1263 spin_unlock(&line->lock);
1267 if (line->state != PBLK_LINESTATE_FREE) {
1268 WARN(1, "pblk: corrupted line %d, state %d\n",
1269 line->id, line->state);
1270 spin_unlock(&line->lock);
1274 line->state = PBLK_LINESTATE_OPEN;
1275 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1278 atomic_set(&line->left_eblks, blk_to_erase);
1279 atomic_set(&line->left_seblks, blk_to_erase);
1281 line->meta_distance = lm->meta_distance;
1282 spin_unlock(&line->lock);
1284 kref_init(&line->ref);
1285 atomic_set(&line->sec_to_update, 0);
1290 /* Line allocations in the recovery path are always single threaded */
1291 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1293 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1296 spin_lock(&l_mg->free_lock);
1297 l_mg->data_line = line;
1298 list_del(&line->list);
1300 ret = pblk_line_prepare(pblk, line);
1302 list_add(&line->list, &l_mg->free_list);
1303 spin_unlock(&l_mg->free_lock);
1306 spin_unlock(&l_mg->free_lock);
1308 ret = pblk_line_alloc_bitmaps(pblk, line);
1312 if (!pblk_line_init_bb(pblk, line, 0)) {
1317 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1321 spin_lock(&l_mg->free_lock);
1322 list_add(&line->list, &l_mg->free_list);
1323 spin_unlock(&l_mg->free_lock);
1328 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1330 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1332 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1333 line->map_bitmap = NULL;
1338 static void pblk_line_reinit(struct pblk_line *line)
1340 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1342 line->map_bitmap = NULL;
1343 line->invalid_bitmap = NULL;
1348 void pblk_line_free(struct pblk_line *line)
1350 struct pblk *pblk = line->pblk;
1351 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1353 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1354 mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
1356 pblk_line_reinit(line);
1359 struct pblk_line *pblk_line_get(struct pblk *pblk)
1361 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1362 struct pblk_line_meta *lm = &pblk->lm;
1363 struct pblk_line *line;
1366 lockdep_assert_held(&l_mg->free_lock);
1369 if (list_empty(&l_mg->free_list)) {
1370 pblk_err(pblk, "no free lines\n");
1374 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1375 list_del(&line->list);
1376 l_mg->nr_free_lines--;
1378 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1379 if (unlikely(bit >= lm->blk_per_line)) {
1380 spin_lock(&line->lock);
1381 line->state = PBLK_LINESTATE_BAD;
1382 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1384 spin_unlock(&line->lock);
1386 list_add_tail(&line->list, &l_mg->bad_list);
1388 pblk_debug(pblk, "line %d is bad\n", line->id);
1392 ret = pblk_line_prepare(pblk, line);
1396 list_add(&line->list, &l_mg->bad_list);
1399 list_add(&line->list, &l_mg->corrupt_list);
1402 pblk_err(pblk, "failed to prepare line %d\n", line->id);
1403 list_add(&line->list, &l_mg->free_list);
1404 l_mg->nr_free_lines++;
1412 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1413 struct pblk_line *line)
1415 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1416 struct pblk_line *retry_line;
1419 spin_lock(&l_mg->free_lock);
1420 retry_line = pblk_line_get(pblk);
1422 l_mg->data_line = NULL;
1423 spin_unlock(&l_mg->free_lock);
1427 retry_line->map_bitmap = line->map_bitmap;
1428 retry_line->invalid_bitmap = line->invalid_bitmap;
1429 retry_line->smeta = line->smeta;
1430 retry_line->emeta = line->emeta;
1431 retry_line->meta_line = line->meta_line;
1433 pblk_line_reinit(line);
1435 l_mg->data_line = retry_line;
1436 spin_unlock(&l_mg->free_lock);
1438 pblk_rl_free_lines_dec(&pblk->rl, line, false);
1440 if (pblk_line_erase(pblk, retry_line))
1446 static void pblk_set_space_limit(struct pblk *pblk)
1448 struct pblk_rl *rl = &pblk->rl;
1450 atomic_set(&rl->rb_space, 0);
1453 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1455 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1456 struct pblk_line *line;
1458 spin_lock(&l_mg->free_lock);
1459 line = pblk_line_get(pblk);
1461 spin_unlock(&l_mg->free_lock);
1465 line->seq_nr = l_mg->d_seq_nr++;
1466 line->type = PBLK_LINETYPE_DATA;
1467 l_mg->data_line = line;
1469 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1471 /* Allocate next line for preparation */
1472 l_mg->data_next = pblk_line_get(pblk);
1473 if (!l_mg->data_next) {
1474 /* If we cannot get a new line, we need to stop the pipeline.
1475 * Only allow as many writes in as we can store safely and then
1478 pblk_set_space_limit(pblk);
1480 l_mg->data_next = NULL;
1482 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1483 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1485 spin_unlock(&l_mg->free_lock);
1487 if (pblk_line_alloc_bitmaps(pblk, line))
1490 if (pblk_line_erase(pblk, line)) {
1491 line = pblk_line_retry(pblk, line);
1497 if (!pblk_line_init_metadata(pblk, line, NULL)) {
1498 line = pblk_line_retry(pblk, line);
1505 if (!pblk_line_init_bb(pblk, line, 1)) {
1506 line = pblk_line_retry(pblk, line);
1513 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1518 void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1520 struct pblk_line *line;
1522 line = pblk_ppa_to_line(pblk, ppa);
1523 kref_put(&line->ref, pblk_line_put_wq);
1526 void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1528 struct ppa_addr *ppa_list;
1531 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
1533 for (i = 0; i < rqd->nr_ppas; i++)
1534 pblk_ppa_to_line_put(pblk, ppa_list[i]);
1537 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1539 lockdep_assert_held(&pblk->l_mg.free_lock);
1541 pblk_set_space_limit(pblk);
1542 pblk->state = PBLK_STATE_STOPPING;
1543 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1546 static void pblk_line_close_meta_sync(struct pblk *pblk)
1548 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1549 struct pblk_line_meta *lm = &pblk->lm;
1550 struct pblk_line *line, *tline;
1553 spin_lock(&l_mg->close_lock);
1554 if (list_empty(&l_mg->emeta_list)) {
1555 spin_unlock(&l_mg->close_lock);
1559 list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1560 spin_unlock(&l_mg->close_lock);
1562 list_for_each_entry_safe(line, tline, &list, list) {
1563 struct pblk_emeta *emeta = line->emeta;
1565 while (emeta->mem < lm->emeta_len[0]) {
1568 ret = pblk_submit_meta_io(pblk, line);
1570 pblk_err(pblk, "sync meta line %d failed (%d)\n",
1577 pblk_wait_for_meta(pblk);
1578 flush_workqueue(pblk->close_wq);
1581 void __pblk_pipeline_flush(struct pblk *pblk)
1583 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1586 spin_lock(&l_mg->free_lock);
1587 if (pblk->state == PBLK_STATE_RECOVERING ||
1588 pblk->state == PBLK_STATE_STOPPED) {
1589 spin_unlock(&l_mg->free_lock);
1592 pblk->state = PBLK_STATE_RECOVERING;
1593 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1594 spin_unlock(&l_mg->free_lock);
1596 pblk_flush_writer(pblk);
1597 pblk_wait_for_meta(pblk);
1599 ret = pblk_recov_pad(pblk);
1601 pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1605 flush_workqueue(pblk->bb_wq);
1606 pblk_line_close_meta_sync(pblk);
1609 void __pblk_pipeline_stop(struct pblk *pblk)
1611 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1613 spin_lock(&l_mg->free_lock);
1614 pblk->state = PBLK_STATE_STOPPED;
1615 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1616 l_mg->data_line = NULL;
1617 l_mg->data_next = NULL;
1618 spin_unlock(&l_mg->free_lock);
1621 void pblk_pipeline_stop(struct pblk *pblk)
1623 __pblk_pipeline_flush(pblk);
1624 __pblk_pipeline_stop(pblk);
1627 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1629 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1630 struct pblk_line *cur, *new = NULL;
1631 unsigned int left_seblks;
1633 new = l_mg->data_next;
1637 spin_lock(&l_mg->free_lock);
1638 cur = l_mg->data_line;
1639 l_mg->data_line = new;
1641 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1642 spin_unlock(&l_mg->free_lock);
1645 left_seblks = atomic_read(&new->left_seblks);
1647 /* If line is not fully erased, erase it */
1648 if (atomic_read(&new->left_eblks)) {
1649 if (pblk_line_erase(pblk, new))
1657 if (pblk_line_alloc_bitmaps(pblk, new))
1661 if (!pblk_line_init_metadata(pblk, new, cur)) {
1662 new = pblk_line_retry(pblk, new);
1669 if (!pblk_line_init_bb(pblk, new, 1)) {
1670 new = pblk_line_retry(pblk, new);
1677 pblk_rl_free_lines_dec(&pblk->rl, new, true);
1679 /* Allocate next line for preparation */
1680 spin_lock(&l_mg->free_lock);
1681 l_mg->data_next = pblk_line_get(pblk);
1682 if (!l_mg->data_next) {
1683 /* If we cannot get a new line, we need to stop the pipeline.
1684 * Only allow as many writes in as we can store safely and then
1687 pblk_stop_writes(pblk, new);
1688 l_mg->data_next = NULL;
1690 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1691 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1693 spin_unlock(&l_mg->free_lock);
1699 static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1701 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1702 struct pblk_gc *gc = &pblk->gc;
1704 spin_lock(&line->lock);
1705 WARN_ON(line->state != PBLK_LINESTATE_GC);
1706 if (line->w_err_gc->has_gc_err) {
1707 spin_unlock(&line->lock);
1708 pblk_err(pblk, "line %d had errors during GC\n", line->id);
1709 pblk_put_line_back(pblk, line);
1710 line->w_err_gc->has_gc_err = 0;
1714 line->state = PBLK_LINESTATE_FREE;
1715 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1717 line->gc_group = PBLK_LINEGC_NONE;
1718 pblk_line_free(line);
1720 if (line->w_err_gc->has_write_err) {
1721 pblk_rl_werr_line_out(&pblk->rl);
1722 line->w_err_gc->has_write_err = 0;
1725 spin_unlock(&line->lock);
1726 atomic_dec(&gc->pipeline_gc);
1728 spin_lock(&l_mg->free_lock);
1729 list_add_tail(&line->list, &l_mg->free_list);
1730 l_mg->nr_free_lines++;
1731 spin_unlock(&l_mg->free_lock);
1733 pblk_rl_free_lines_inc(&pblk->rl, line);
1736 static void pblk_line_put_ws(struct work_struct *work)
1738 struct pblk_line_ws *line_put_ws = container_of(work,
1739 struct pblk_line_ws, ws);
1740 struct pblk *pblk = line_put_ws->pblk;
1741 struct pblk_line *line = line_put_ws->line;
1743 __pblk_line_put(pblk, line);
1744 mempool_free(line_put_ws, &pblk->gen_ws_pool);
1747 void pblk_line_put(struct kref *ref)
1749 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1750 struct pblk *pblk = line->pblk;
1752 __pblk_line_put(pblk, line);
1755 void pblk_line_put_wq(struct kref *ref)
1757 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1758 struct pblk *pblk = line->pblk;
1759 struct pblk_line_ws *line_put_ws;
1761 line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1765 line_put_ws->pblk = pblk;
1766 line_put_ws->line = line;
1767 line_put_ws->priv = NULL;
1769 INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1770 queue_work(pblk->r_end_wq, &line_put_ws->ws);
1773 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1778 rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1780 pblk_setup_e_rq(pblk, rqd, ppa);
1782 rqd->end_io = pblk_end_io_erase;
1783 rqd->private = pblk;
1785 trace_pblk_chunk_reset(pblk_disk_name(pblk),
1786 &ppa, PBLK_CHUNK_RESET_START);
1788 /* The write thread schedules erases so that it minimizes disturbances
1789 * with writes. Thus, there is no need to take the LUN semaphore.
1791 err = pblk_submit_io(pblk, rqd);
1793 struct nvm_tgt_dev *dev = pblk->dev;
1794 struct nvm_geo *geo = &dev->geo;
1796 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1797 pblk_ppa_to_line_id(ppa),
1798 pblk_ppa_to_pos(geo, ppa));
1804 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1806 return pblk->l_mg.data_line;
1809 /* For now, always erase next line */
1810 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1812 return pblk->l_mg.data_next;
1815 int pblk_line_is_full(struct pblk_line *line)
1817 return (line->left_msecs == 0);
1820 static void pblk_line_should_sync_meta(struct pblk *pblk)
1822 if (pblk_rl_is_limit(&pblk->rl))
1823 pblk_line_close_meta_sync(pblk);
1826 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1828 struct nvm_tgt_dev *dev = pblk->dev;
1829 struct nvm_geo *geo = &dev->geo;
1830 struct pblk_line_meta *lm = &pblk->lm;
1831 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1832 struct list_head *move_list;
1835 #ifdef CONFIG_NVM_PBLK_DEBUG
1836 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1837 "pblk: corrupt closed line %d\n", line->id);
1840 spin_lock(&l_mg->free_lock);
1841 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1842 spin_unlock(&l_mg->free_lock);
1844 spin_lock(&l_mg->gc_lock);
1845 spin_lock(&line->lock);
1846 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1847 line->state = PBLK_LINESTATE_CLOSED;
1848 move_list = pblk_line_gc_list(pblk, line);
1849 list_add_tail(&line->list, move_list);
1851 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1852 line->map_bitmap = NULL;
1856 for (i = 0; i < lm->blk_per_line; i++) {
1857 struct pblk_lun *rlun = &pblk->luns[i];
1858 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1859 int state = line->chks[pos].state;
1861 if (!(state & NVM_CHK_ST_OFFLINE))
1862 state = NVM_CHK_ST_CLOSED;
1865 spin_unlock(&line->lock);
1866 spin_unlock(&l_mg->gc_lock);
1868 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1872 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1874 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1875 struct pblk_line_meta *lm = &pblk->lm;
1876 struct pblk_emeta *emeta = line->emeta;
1877 struct line_emeta *emeta_buf = emeta->buf;
1878 struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1880 /* No need for exact vsc value; avoid a big line lock and take aprox. */
1881 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1882 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1884 wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1885 wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1886 wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1888 if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1889 emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1890 guid_copy((guid_t *)&emeta_buf->header.uuid,
1891 &pblk->instance_uuid);
1892 emeta_buf->header.id = cpu_to_le32(line->id);
1893 emeta_buf->header.type = cpu_to_le16(line->type);
1894 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1895 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1896 emeta_buf->header.crc = cpu_to_le32(
1897 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1900 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1901 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1903 spin_lock(&l_mg->close_lock);
1904 spin_lock(&line->lock);
1906 /* Update the in-memory start address for emeta, in case it has
1907 * shifted due to write errors
1909 if (line->emeta_ssec != line->cur_sec)
1910 line->emeta_ssec = line->cur_sec;
1912 list_add_tail(&line->list, &l_mg->emeta_list);
1913 spin_unlock(&line->lock);
1914 spin_unlock(&l_mg->close_lock);
1916 pblk_line_should_sync_meta(pblk);
1919 static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1921 struct pblk_line_meta *lm = &pblk->lm;
1922 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1923 unsigned int lba_list_size = lm->emeta_len[2];
1924 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1925 struct pblk_emeta *emeta = line->emeta;
1927 w_err_gc->lba_list = pblk_malloc(lba_list_size,
1928 l_mg->emeta_alloc_type, GFP_KERNEL);
1929 memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1933 void pblk_line_close_ws(struct work_struct *work)
1935 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1937 struct pblk *pblk = line_ws->pblk;
1938 struct pblk_line *line = line_ws->line;
1939 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1941 /* Write errors makes the emeta start address stored in smeta invalid,
1942 * so keep a copy of the lba list until we've gc'd the line
1944 if (w_err_gc->has_write_err)
1945 pblk_save_lba_list(pblk, line);
1947 pblk_line_close(pblk, line);
1948 mempool_free(line_ws, &pblk->gen_ws_pool);
1951 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1952 void (*work)(struct work_struct *), gfp_t gfp_mask,
1953 struct workqueue_struct *wq)
1955 struct pblk_line_ws *line_ws;
1957 line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1959 line_ws->pblk = pblk;
1960 line_ws->line = line;
1961 line_ws->priv = priv;
1963 INIT_WORK(&line_ws->ws, work);
1964 queue_work(wq, &line_ws->ws);
1967 static void __pblk_down_chunk(struct pblk *pblk, int pos)
1969 struct pblk_lun *rlun = &pblk->luns[pos];
1973 * Only send one inflight I/O per LUN. Since we map at a page
1974 * granurality, all ppas in the I/O will map to the same LUN
1977 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1978 if (ret == -ETIME || ret == -EINTR)
1979 pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1983 void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1985 struct nvm_tgt_dev *dev = pblk->dev;
1986 struct nvm_geo *geo = &dev->geo;
1987 int pos = pblk_ppa_to_pos(geo, ppa);
1989 __pblk_down_chunk(pblk, pos);
1992 void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1993 unsigned long *lun_bitmap)
1995 struct nvm_tgt_dev *dev = pblk->dev;
1996 struct nvm_geo *geo = &dev->geo;
1997 int pos = pblk_ppa_to_pos(geo, ppa);
1999 /* If the LUN has been locked for this same request, do no attempt to
2002 if (test_and_set_bit(pos, lun_bitmap))
2005 __pblk_down_chunk(pblk, pos);
2008 void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
2010 struct nvm_tgt_dev *dev = pblk->dev;
2011 struct nvm_geo *geo = &dev->geo;
2012 struct pblk_lun *rlun;
2013 int pos = pblk_ppa_to_pos(geo, ppa);
2015 rlun = &pblk->luns[pos];
2019 void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
2021 struct nvm_tgt_dev *dev = pblk->dev;
2022 struct nvm_geo *geo = &dev->geo;
2023 struct pblk_lun *rlun;
2024 int num_lun = geo->all_luns;
2027 while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
2028 rlun = &pblk->luns[bit];
2033 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2035 struct ppa_addr ppa_l2p;
2037 /* logic error: lba out-of-bounds. Ignore update */
2038 if (!(lba < pblk->capacity)) {
2039 WARN(1, "pblk: corrupted L2P map request\n");
2043 spin_lock(&pblk->trans_lock);
2044 ppa_l2p = pblk_trans_map_get(pblk, lba);
2046 if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
2047 pblk_map_invalidate(pblk, ppa_l2p);
2049 pblk_trans_map_set(pblk, lba, ppa);
2050 spin_unlock(&pblk->trans_lock);
2053 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2056 #ifdef CONFIG_NVM_PBLK_DEBUG
2057 /* Callers must ensure that the ppa points to a cache address */
2058 BUG_ON(!pblk_addr_in_cache(ppa));
2059 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
2062 pblk_update_map(pblk, lba, ppa);
2065 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
2066 struct pblk_line *gc_line, u64 paddr_gc)
2068 struct ppa_addr ppa_l2p, ppa_gc;
2071 #ifdef CONFIG_NVM_PBLK_DEBUG
2072 /* Callers must ensure that the ppa points to a cache address */
2073 BUG_ON(!pblk_addr_in_cache(ppa_new));
2074 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
2077 /* logic error: lba out-of-bounds. Ignore update */
2078 if (!(lba < pblk->capacity)) {
2079 WARN(1, "pblk: corrupted L2P map request\n");
2083 spin_lock(&pblk->trans_lock);
2084 ppa_l2p = pblk_trans_map_get(pblk, lba);
2085 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
2087 if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
2088 spin_lock(&gc_line->lock);
2089 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
2090 "pblk: corrupted GC update");
2091 spin_unlock(&gc_line->lock);
2097 pblk_trans_map_set(pblk, lba, ppa_new);
2099 spin_unlock(&pblk->trans_lock);
2103 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2104 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2106 struct ppa_addr ppa_l2p;
2108 #ifdef CONFIG_NVM_PBLK_DEBUG
2109 /* Callers must ensure that the ppa points to a device address */
2110 BUG_ON(pblk_addr_in_cache(ppa_mapped));
2112 /* Invalidate and discard padded entries */
2113 if (lba == ADDR_EMPTY) {
2114 atomic64_inc(&pblk->pad_wa);
2115 #ifdef CONFIG_NVM_PBLK_DEBUG
2116 atomic_long_inc(&pblk->padded_wb);
2118 if (!pblk_ppa_empty(ppa_mapped))
2119 pblk_map_invalidate(pblk, ppa_mapped);
2123 /* logic error: lba out-of-bounds. Ignore update */
2124 if (!(lba < pblk->capacity)) {
2125 WARN(1, "pblk: corrupted L2P map request\n");
2129 spin_lock(&pblk->trans_lock);
2130 ppa_l2p = pblk_trans_map_get(pblk, lba);
2132 /* Do not update L2P if the cacheline has been updated. In this case,
2133 * the mapped ppa must be invalidated
2135 if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2136 if (!pblk_ppa_empty(ppa_mapped))
2137 pblk_map_invalidate(pblk, ppa_mapped);
2141 #ifdef CONFIG_NVM_PBLK_DEBUG
2142 WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2145 pblk_trans_map_set(pblk, lba, ppa_mapped);
2147 spin_unlock(&pblk->trans_lock);
2150 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2151 sector_t blba, int nr_secs)
2155 spin_lock(&pblk->trans_lock);
2156 for (i = 0; i < nr_secs; i++) {
2157 struct ppa_addr ppa;
2159 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2161 /* If the L2P entry maps to a line, the reference is valid */
2162 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2163 struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2165 kref_get(&line->ref);
2168 spin_unlock(&pblk->trans_lock);
2171 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2172 u64 *lba_list, int nr_secs)
2177 spin_lock(&pblk->trans_lock);
2178 for (i = 0; i < nr_secs; i++) {
2180 if (lba != ADDR_EMPTY) {
2181 /* logic error: lba out-of-bounds. Ignore update */
2182 if (!(lba < pblk->capacity)) {
2183 WARN(1, "pblk: corrupted L2P map request\n");
2186 ppas[i] = pblk_trans_map_get(pblk, lba);
2189 spin_unlock(&pblk->trans_lock);
2192 void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
2196 if (pblk_is_oob_meta_supported(pblk)) {
2197 /* Just use OOB metadata buffer as always */
2198 buffer = rqd->meta_list;
2200 /* We need to reuse last page of request (packed metadata)
2201 * in similar way as traditional oob metadata
2203 buffer = page_to_virt(
2204 rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2210 void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
2212 void *meta_list = rqd->meta_list;
2216 if (pblk_is_oob_meta_supported(pblk))
2219 page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2220 /* We need to fill oob meta buffer with data from packed metadata */
2221 for (; i < rqd->nr_ppas; i++)
2222 memcpy(pblk_get_meta(pblk, meta_list, i),
2223 page + (i * sizeof(struct pblk_sec_meta)),
2224 sizeof(struct pblk_sec_meta));