Merge tag 'mfd-next-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[linux-2.6-block.git] / drivers / lightnvm / pblk-map.c
CommitLineData
02a1520d 1// SPDX-License-Identifier: GPL-2.0
a4bd217b
JG
2/*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * pblk-map.c - pblk's lba-ppa mapping strategy
17 *
18 */
19
20#include "pblk.h"
21
2deeefc0
JG
22static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
23 struct ppa_addr *ppa_list,
24 unsigned long *lun_bitmap,
faa79f27 25 void *meta_list,
2deeefc0 26 unsigned int valid_secs)
a4bd217b
JG
27{
28 struct pblk_line *line = pblk_line_get_data(pblk);
21d22871 29 struct pblk_emeta *emeta;
a4bd217b 30 struct pblk_w_ctx *w_ctx;
21d22871 31 __le64 *lba_list;
a4bd217b
JG
32 u64 paddr;
33 int nr_secs = pblk->min_write_pgs;
34 int i;
35
525f7bb2
HH
36 if (!line)
37 return -ENOSPC;
38
21d22871
JG
39 if (pblk_line_is_full(line)) {
40 struct pblk_line *prev_line = line;
41
2deeefc0
JG
42 /* If we cannot allocate a new line, make sure to store metadata
43 * on current line and then fail
44 */
21d22871
JG
45 line = pblk_line_replace_data(pblk);
46 pblk_line_close_meta(pblk, prev_line);
2deeefc0 47
525f7bb2
HH
48 if (!line) {
49 pblk_pipeline_stop(pblk);
50 return -ENOSPC;
51 }
52
21d22871
JG
53 }
54
55 emeta = line->emeta;
56 lba_list = emeta_to_lbas(pblk, emeta->buf);
57
a4bd217b
JG
58 paddr = pblk_alloc_page(pblk, line, nr_secs);
59
60 for (i = 0; i < nr_secs; i++, paddr++) {
faa79f27 61 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
03e868eb
HH
62 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
63
a4bd217b
JG
64 /* ppa to be sent to the device */
65 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
66
67 /* Write context for target bio completion on write buffer. Note
68 * that the write buffer is protected by the sync backpointer,
69 * and a single writer thread have access to each specific entry
70 * at a time. Thus, it is safe to modify the context for the
71 * entry we are setting up for submission without taking any
72 * lock or memory barrier.
73 */
74 if (i < valid_secs) {
75 kref_get(&line->ref);
0586942f 76 atomic_inc(&line->sec_to_update);
a4bd217b
JG
77 w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
78 w_ctx->ppa = ppa_list[i];
faa79f27 79 meta->lba = cpu_to_le64(w_ctx->lba);
a4bd217b 80 lba_list[paddr] = cpu_to_le64(w_ctx->lba);
03e868eb
HH
81 if (lba_list[paddr] != addr_empty)
82 line->nr_valid_lbas++;
76758390
HH
83 else
84 atomic64_inc(&pblk->pad_wa);
a4bd217b 85 } else {
faa79f27
IK
86 lba_list[paddr] = addr_empty;
87 meta->lba = addr_empty;
0880a9aa 88 __pblk_map_invalidate(pblk, line, paddr);
a4bd217b
JG
89 }
90 }
91
43241cfe 92 pblk_down_rq(pblk, ppa_list[0], lun_bitmap);
2deeefc0 93 return 0;
a4bd217b
JG
94}
95
525f7bb2 96int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
a4bd217b
JG
97 unsigned long *lun_bitmap, unsigned int valid_secs,
98 unsigned int off)
99{
55d8ec35 100 void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
faa79f27 101 void *meta_buffer;
d68a9344 102 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
a4bd217b
JG
103 unsigned int map_secs;
104 int min = pblk->min_write_pgs;
105 int i;
525f7bb2 106 int ret;
a4bd217b
JG
107
108 for (i = off; i < rqd->nr_ppas; i += min) {
109 map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
faa79f27 110 meta_buffer = pblk_get_meta(pblk, meta_list, i);
525f7bb2
HH
111
112 ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
faa79f27 113 lun_bitmap, meta_buffer, map_secs);
525f7bb2
HH
114 if (ret)
115 return ret;
a4bd217b 116 }
525f7bb2
HH
117
118 return 0;
a4bd217b
JG
119}
120
121/* only if erase_ppa is set, acquire erase semaphore */
525f7bb2 122int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
a4bd217b
JG
123 unsigned int sentry, unsigned long *lun_bitmap,
124 unsigned int valid_secs, struct ppa_addr *erase_ppa)
125{
126 struct nvm_tgt_dev *dev = pblk->dev;
127 struct nvm_geo *geo = &dev->geo;
d624f371 128 struct pblk_line_meta *lm = &pblk->lm;
55d8ec35 129 void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
faa79f27 130 void *meta_buffer;
d68a9344 131 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
d624f371 132 struct pblk_line *e_line, *d_line;
a4bd217b
JG
133 unsigned int map_secs;
134 int min = pblk->min_write_pgs;
135 int i, erase_lun;
525f7bb2
HH
136 int ret;
137
a4bd217b
JG
138
139 for (i = 0; i < rqd->nr_ppas; i += min) {
140 map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
faa79f27 141 meta_buffer = pblk_get_meta(pblk, meta_list, i);
525f7bb2
HH
142
143 ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
faa79f27 144 lun_bitmap, meta_buffer, map_secs);
525f7bb2
HH
145 if (ret)
146 return ret;
a4bd217b 147
d68a9344 148 erase_lun = pblk_ppa_to_pos(geo, ppa_list[i]);
dd2a4343 149
588726d3
JG
150 /* line can change after page map. We might also be writing the
151 * last line.
152 */
153 e_line = pblk_line_get_erase(pblk);
154 if (!e_line)
155 return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
156 valid_secs, i + min);
157
d624f371 158 spin_lock(&e_line->lock);
a4bd217b 159 if (!test_bit(erase_lun, e_line->erase_bitmap)) {
a4bd217b 160 set_bit(erase_lun, e_line->erase_bitmap);
a44f53fa 161 atomic_dec(&e_line->left_eblks);
d624f371 162
d68a9344 163 *erase_ppa = ppa_list[i];
69471513 164 erase_ppa->a.blk = e_line->id;
75c89bef 165 erase_ppa->a.reserved = 0;
a4bd217b 166
d624f371
JG
167 spin_unlock(&e_line->lock);
168
a4bd217b
JG
169 /* Avoid evaluating e_line->left_eblks */
170 return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
171 valid_secs, i + min);
172 }
d624f371 173 spin_unlock(&e_line->lock);
a4bd217b
JG
174 }
175
d624f371 176 d_line = pblk_line_get_data(pblk);
a4bd217b 177
588726d3
JG
178 /* line can change after page map. We might also be writing the
179 * last line.
180 */
181 e_line = pblk_line_get_erase(pblk);
182 if (!e_line)
525f7bb2 183 return -ENOSPC;
588726d3 184
d624f371 185 /* Erase blocks that are bad in this line but might not be in next */
26f76dce 186 if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
d624f371
JG
187 bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
188 int bit = -1;
189
190retry:
191 bit = find_next_bit(d_line->blk_bitmap,
192 lm->blk_per_line, bit + 1);
193 if (bit >= lm->blk_per_line)
525f7bb2 194 return 0;
a4bd217b 195
d624f371
JG
196 spin_lock(&e_line->lock);
197 if (test_bit(bit, e_line->erase_bitmap)) {
198 spin_unlock(&e_line->lock);
199 goto retry;
200 }
201 spin_unlock(&e_line->lock);
202
203 set_bit(bit, e_line->erase_bitmap);
a44f53fa 204 atomic_dec(&e_line->left_eblks);
d624f371 205 *erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
69471513 206 erase_ppa->a.blk = e_line->id;
a4bd217b 207 }
525f7bb2
HH
208
209 return 0;
a4bd217b 210}