lightnvm: pblk: delete redundant debug line stat
[linux-block.git] / drivers / lightnvm / pblk-map.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-map.c - pblk's lba-ppa mapping strategy
16 *
17 */
18
19#include "pblk.h"
20
21static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
22 struct ppa_addr *ppa_list,
23 unsigned long *lun_bitmap,
24 struct pblk_sec_meta *meta_list,
25 unsigned int valid_secs)
26{
27 struct pblk_line *line = pblk_line_get_data(pblk);
dd2a4343 28 struct pblk_emeta *emeta = line->emeta;
a4bd217b 29 struct pblk_w_ctx *w_ctx;
dd2a4343 30 __le64 *lba_list = emeta_to_lbas(pblk, emeta->buf);
a4bd217b
JG
31 u64 paddr;
32 int nr_secs = pblk->min_write_pgs;
33 int i;
34
35 paddr = pblk_alloc_page(pblk, line, nr_secs);
36
37 for (i = 0; i < nr_secs; i++, paddr++) {
38 /* ppa to be sent to the device */
39 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
40
41 /* Write context for target bio completion on write buffer. Note
42 * that the write buffer is protected by the sync backpointer,
43 * and a single writer thread have access to each specific entry
44 * at a time. Thus, it is safe to modify the context for the
45 * entry we are setting up for submission without taking any
46 * lock or memory barrier.
47 */
48 if (i < valid_secs) {
49 kref_get(&line->ref);
50 w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
51 w_ctx->ppa = ppa_list[i];
52 meta_list[i].lba = cpu_to_le64(w_ctx->lba);
53 lba_list[paddr] = cpu_to_le64(w_ctx->lba);
dd2a4343 54 line->nr_valid_lbas++;
a4bd217b 55 } else {
caa69fa5
JG
56 u64 addr_empty = cpu_to_le64(ADDR_EMPTY);
57
58 lba_list[paddr] = meta_list[i].lba = addr_empty;
a4bd217b
JG
59 pblk_map_pad_invalidate(pblk, line, paddr);
60 }
61 }
62
63 if (pblk_line_is_full(line)) {
dd2a4343 64 struct pblk_line *prev_line = line;
a4bd217b
JG
65 line = pblk_line_replace_data(pblk);
66 if (!line)
67 return;
dd2a4343 68 pblk_line_close_meta(pblk, prev_line);
a4bd217b
JG
69 }
70
71 pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
72}
73
74void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
75 unsigned long *lun_bitmap, unsigned int valid_secs,
76 unsigned int off)
77{
78 struct pblk_sec_meta *meta_list = rqd->meta_list;
79 unsigned int map_secs;
80 int min = pblk->min_write_pgs;
81 int i;
82
83 for (i = off; i < rqd->nr_ppas; i += min) {
84 map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
85 pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
86 lun_bitmap, &meta_list[i], map_secs);
87 }
88}
89
90/* only if erase_ppa is set, acquire erase semaphore */
91void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
92 unsigned int sentry, unsigned long *lun_bitmap,
93 unsigned int valid_secs, struct ppa_addr *erase_ppa)
94{
95 struct nvm_tgt_dev *dev = pblk->dev;
96 struct nvm_geo *geo = &dev->geo;
d624f371 97 struct pblk_line_meta *lm = &pblk->lm;
a4bd217b 98 struct pblk_sec_meta *meta_list = rqd->meta_list;
d624f371 99 struct pblk_line *e_line, *d_line;
a4bd217b
JG
100 unsigned int map_secs;
101 int min = pblk->min_write_pgs;
102 int i, erase_lun;
103
104 for (i = 0; i < rqd->nr_ppas; i += min) {
105 map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
106 pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
107 lun_bitmap, &meta_list[i], map_secs);
108
d624f371
JG
109 /* line can change after page map */
110 e_line = pblk_line_get_erase(pblk);
dd2a4343
JG
111 erase_lun = pblk_ppa_to_pos(geo, rqd->ppa_list[i]);
112
d624f371 113 spin_lock(&e_line->lock);
a4bd217b 114 if (!test_bit(erase_lun, e_line->erase_bitmap)) {
a4bd217b 115 set_bit(erase_lun, e_line->erase_bitmap);
a44f53fa 116 atomic_dec(&e_line->left_eblks);
d624f371 117
a4bd217b
JG
118 *erase_ppa = rqd->ppa_list[i];
119 erase_ppa->g.blk = e_line->id;
120
d624f371
JG
121 spin_unlock(&e_line->lock);
122
a4bd217b
JG
123 /* Avoid evaluating e_line->left_eblks */
124 return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
125 valid_secs, i + min);
126 }
d624f371 127 spin_unlock(&e_line->lock);
a4bd217b
JG
128 }
129
d624f371
JG
130 e_line = pblk_line_get_erase(pblk);
131 d_line = pblk_line_get_data(pblk);
a4bd217b 132
d624f371
JG
133 /* Erase blocks that are bad in this line but might not be in next */
134 if (unlikely(ppa_empty(*erase_ppa)) &&
135 bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
136 int bit = -1;
137
138retry:
139 bit = find_next_bit(d_line->blk_bitmap,
140 lm->blk_per_line, bit + 1);
141 if (bit >= lm->blk_per_line)
a4bd217b
JG
142 return;
143
d624f371
JG
144 spin_lock(&e_line->lock);
145 if (test_bit(bit, e_line->erase_bitmap)) {
146 spin_unlock(&e_line->lock);
147 goto retry;
148 }
149 spin_unlock(&e_line->lock);
150
151 set_bit(bit, e_line->erase_bitmap);
a44f53fa 152 atomic_dec(&e_line->left_eblks);
d624f371 153 *erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
a4bd217b
JG
154 erase_ppa->g.blk = e_line->id;
155 }
156}