Commit | Line | Data |
---|---|---|
02a1520d | 1 | // SPDX-License-Identifier: GPL-2.0 |
a4bd217b JG |
2 | /* |
3 | * Copyright (C) 2015 IT University of Copenhagen (rrpc.c) | |
4 | * Copyright (C) 2016 CNEX Labs | |
5 | * Initial release: Javier Gonzalez <javier@cnexlabs.com> | |
6 | * Matias Bjorling <matias@cnexlabs.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License version | |
10 | * 2 as published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but | |
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * Implementation of a physical block-device target for Open-channel SSDs. | |
18 | * | |
19 | * pblk-init.c - pblk's initialization. | |
20 | */ | |
21 | ||
22 | #include "pblk.h" | |
4c44abf4 | 23 | #include "pblk-trace.h" |
a4bd217b | 24 | |
21ff1399 | 25 | static unsigned int write_buffer_size; |
4a828884 MD |
26 | |
27 | module_param(write_buffer_size, uint, 0644); | |
28 | MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer"); | |
29 | ||
1864de94 HH |
30 | struct pblk_global_caches { |
31 | struct kmem_cache *ws; | |
32 | struct kmem_cache *rec; | |
33 | struct kmem_cache *g_rq; | |
34 | struct kmem_cache *w_rq; | |
35 | ||
36 | struct kref kref; | |
37 | ||
38 | struct mutex mutex; /* Ensures consistency between | |
39 | * caches and kref | |
40 | */ | |
41 | }; | |
42 | ||
43 | static struct pblk_global_caches pblk_caches = { | |
44 | .mutex = __MUTEX_INITIALIZER(pblk_caches.mutex), | |
45 | .kref = KREF_INIT(0), | |
46 | }; | |
47 | ||
b906bbb6 | 48 | struct bio_set pblk_bio_set; |
a4bd217b JG |
49 | |
50 | static int pblk_rw_io(struct request_queue *q, struct pblk *pblk, | |
51 | struct bio *bio) | |
52 | { | |
53 | int ret; | |
54 | ||
55 | /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap | |
56 | * constraint. Writes can be of arbitrary size. | |
57 | */ | |
58 | if (bio_data_dir(bio) == READ) { | |
af67c31f | 59 | blk_queue_split(q, &bio); |
a4bd217b JG |
60 | ret = pblk_submit_read(pblk, bio); |
61 | if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED)) | |
62 | bio_put(bio); | |
63 | ||
64 | return ret; | |
65 | } | |
66 | ||
67 | /* Prevent deadlock in the case of a modest LUN configuration and large | |
68 | * user I/Os. Unless stalled, the rate limiter leaves at least 256KB | |
69 | * available for user I/O. | |
70 | */ | |
da67e68f | 71 | if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl)) |
af67c31f | 72 | blk_queue_split(q, &bio); |
a4bd217b JG |
73 | |
74 | return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER); | |
75 | } | |
76 | ||
77 | static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) | |
78 | { | |
79 | struct pblk *pblk = q->queuedata; | |
80 | ||
81 | if (bio_op(bio) == REQ_OP_DISCARD) { | |
82 | pblk_discard(pblk, bio); | |
83 | if (!(bio->bi_opf & REQ_PREFLUSH)) { | |
84 | bio_endio(bio); | |
85 | return BLK_QC_T_NONE; | |
86 | } | |
87 | } | |
88 | ||
89 | switch (pblk_rw_io(q, pblk, bio)) { | |
90 | case NVM_IO_ERR: | |
91 | bio_io_error(bio); | |
92 | break; | |
93 | case NVM_IO_DONE: | |
94 | bio_endio(bio); | |
95 | break; | |
96 | } | |
97 | ||
98 | return BLK_QC_T_NONE; | |
99 | } | |
100 | ||
c5586192 HH |
101 | static size_t pblk_trans_map_size(struct pblk *pblk) |
102 | { | |
103 | int entry_size = 8; | |
104 | ||
bb845ae4 | 105 | if (pblk->addrf_len < 32) |
c5586192 HH |
106 | entry_size = 4; |
107 | ||
108 | return entry_size * pblk->rl.nr_secs; | |
109 | } | |
110 | ||
880eda54 | 111 | #ifdef CONFIG_NVM_PBLK_DEBUG |
c5586192 HH |
112 | static u32 pblk_l2p_crc(struct pblk *pblk) |
113 | { | |
114 | size_t map_size; | |
115 | u32 crc = ~(u32)0; | |
116 | ||
117 | map_size = pblk_trans_map_size(pblk); | |
118 | crc = crc32_le(crc, pblk->trans_map, map_size); | |
119 | return crc; | |
120 | } | |
121 | #endif | |
122 | ||
a4bd217b JG |
123 | static void pblk_l2p_free(struct pblk *pblk) |
124 | { | |
125 | vfree(pblk->trans_map); | |
126 | } | |
127 | ||
43d47127 JG |
128 | static int pblk_l2p_recover(struct pblk *pblk, bool factory_init) |
129 | { | |
130 | struct pblk_line *line = NULL; | |
131 | ||
132 | if (factory_init) { | |
7e0a0847 | 133 | guid_gen(&pblk->instance_uuid); |
43d47127 JG |
134 | } else { |
135 | line = pblk_recov_l2p(pblk); | |
136 | if (IS_ERR(line)) { | |
4e495a46 | 137 | pblk_err(pblk, "could not recover l2p table\n"); |
43d47127 JG |
138 | return -EFAULT; |
139 | } | |
140 | } | |
141 | ||
880eda54 | 142 | #ifdef CONFIG_NVM_PBLK_DEBUG |
4e495a46 | 143 | pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk)); |
43d47127 JG |
144 | #endif |
145 | ||
146 | /* Free full lines directly as GC has not been started yet */ | |
147 | pblk_gc_free_full_lines(pblk); | |
148 | ||
149 | if (!line) { | |
150 | /* Configure next line for user data */ | |
151 | line = pblk_line_get_first_data(pblk); | |
1d8b33e0 | 152 | if (!line) |
43d47127 | 153 | return -EFAULT; |
43d47127 JG |
154 | } |
155 | ||
156 | return 0; | |
157 | } | |
158 | ||
159 | static int pblk_l2p_init(struct pblk *pblk, bool factory_init) | |
a4bd217b JG |
160 | { |
161 | sector_t i; | |
162 | struct ppa_addr ppa; | |
c5586192 | 163 | size_t map_size; |
1d8b33e0 | 164 | int ret = 0; |
a4bd217b | 165 | |
c5586192 HH |
166 | map_size = pblk_trans_map_size(pblk); |
167 | pblk->trans_map = vmalloc(map_size); | |
a4bd217b JG |
168 | if (!pblk->trans_map) |
169 | return -ENOMEM; | |
170 | ||
171 | pblk_ppa_set_empty(&ppa); | |
172 | ||
173 | for (i = 0; i < pblk->rl.nr_secs; i++) | |
174 | pblk_trans_map_set(pblk, i, ppa); | |
175 | ||
1d8b33e0 JG |
176 | ret = pblk_l2p_recover(pblk, factory_init); |
177 | if (ret) | |
178 | vfree(pblk->trans_map); | |
179 | ||
180 | return ret; | |
a4bd217b JG |
181 | } |
182 | ||
183 | static void pblk_rwb_free(struct pblk *pblk) | |
184 | { | |
185 | if (pblk_rb_tear_down_check(&pblk->rwb)) | |
4e495a46 | 186 | pblk_err(pblk, "write buffer error on tear down\n"); |
a4bd217b | 187 | |
9bd1f875 | 188 | pblk_rb_free(&pblk->rwb); |
a4bd217b JG |
189 | } |
190 | ||
191 | static int pblk_rwb_init(struct pblk *pblk) | |
192 | { | |
193 | struct nvm_tgt_dev *dev = pblk->dev; | |
194 | struct nvm_geo *geo = &dev->geo; | |
9bd1f875 | 195 | unsigned long buffer_size; |
766c8ceb | 196 | int pgs_in_buffer, threshold; |
a4bd217b | 197 | |
766c8ceb | 198 | threshold = geo->mw_cunits * geo->all_luns; |
d672d92d JG |
199 | pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt) |
200 | * geo->all_luns; | |
ffc03fb7 MD |
201 | |
202 | if (write_buffer_size && (write_buffer_size > pgs_in_buffer)) | |
4a828884 MD |
203 | buffer_size = write_buffer_size; |
204 | else | |
ffc03fb7 | 205 | buffer_size = pgs_in_buffer; |
4a828884 | 206 | |
766c8ceb | 207 | return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs); |
a4bd217b JG |
208 | } |
209 | ||
4e495a46 MB |
210 | static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo, |
211 | struct nvm_addrf_12 *dst) | |
a4bd217b | 212 | { |
e46f4e48 JG |
213 | struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf; |
214 | int power_len; | |
a4bd217b JG |
215 | |
216 | /* Re-calculate channel and lun format to adapt to configuration */ | |
a40afad9 JG |
217 | power_len = get_count_order(geo->num_ch); |
218 | if (1 << power_len != geo->num_ch) { | |
4e495a46 | 219 | pblk_err(pblk, "supports only power-of-two channel config.\n"); |
a4bd217b JG |
220 | return -EINVAL; |
221 | } | |
e46f4e48 | 222 | dst->ch_len = power_len; |
a4bd217b | 223 | |
a40afad9 JG |
224 | power_len = get_count_order(geo->num_lun); |
225 | if (1 << power_len != geo->num_lun) { | |
4e495a46 | 226 | pblk_err(pblk, "supports only power-of-two LUN config.\n"); |
a4bd217b JG |
227 | return -EINVAL; |
228 | } | |
e46f4e48 JG |
229 | dst->lun_len = power_len; |
230 | ||
231 | dst->blk_len = src->blk_len; | |
232 | dst->pg_len = src->pg_len; | |
233 | dst->pln_len = src->pln_len; | |
a40afad9 | 234 | dst->sec_len = src->sec_len; |
e46f4e48 | 235 | |
a40afad9 JG |
236 | dst->sec_offset = 0; |
237 | dst->pln_offset = dst->sec_len; | |
e46f4e48 JG |
238 | dst->ch_offset = dst->pln_offset + dst->pln_len; |
239 | dst->lun_offset = dst->ch_offset + dst->ch_len; | |
240 | dst->pg_offset = dst->lun_offset + dst->lun_len; | |
241 | dst->blk_offset = dst->pg_offset + dst->pg_len; | |
242 | ||
a40afad9 | 243 | dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset; |
e46f4e48 JG |
244 | dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset; |
245 | dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset; | |
246 | dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset; | |
247 | dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset; | |
248 | dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset; | |
249 | ||
250 | return dst->blk_offset + src->blk_len; | |
251 | } | |
252 | ||
3b2a3ad1 JG |
253 | static int pblk_set_addrf_20(struct nvm_geo *geo, struct nvm_addrf *adst, |
254 | struct pblk_addrf *udst) | |
255 | { | |
256 | struct nvm_addrf *src = &geo->addrf; | |
257 | ||
258 | adst->ch_len = get_count_order(geo->num_ch); | |
259 | adst->lun_len = get_count_order(geo->num_lun); | |
260 | adst->chk_len = src->chk_len; | |
261 | adst->sec_len = src->sec_len; | |
262 | ||
263 | adst->sec_offset = 0; | |
264 | adst->ch_offset = adst->sec_len; | |
265 | adst->lun_offset = adst->ch_offset + adst->ch_len; | |
266 | adst->chk_offset = adst->lun_offset + adst->lun_len; | |
267 | ||
268 | adst->sec_mask = ((1ULL << adst->sec_len) - 1) << adst->sec_offset; | |
269 | adst->chk_mask = ((1ULL << adst->chk_len) - 1) << adst->chk_offset; | |
270 | adst->lun_mask = ((1ULL << adst->lun_len) - 1) << adst->lun_offset; | |
271 | adst->ch_mask = ((1ULL << adst->ch_len) - 1) << adst->ch_offset; | |
272 | ||
273 | udst->sec_stripe = geo->ws_opt; | |
274 | udst->ch_stripe = geo->num_ch; | |
275 | udst->lun_stripe = geo->num_lun; | |
276 | ||
277 | udst->sec_lun_stripe = udst->sec_stripe * udst->ch_stripe; | |
278 | udst->sec_ws_stripe = udst->sec_lun_stripe * udst->lun_stripe; | |
279 | ||
280 | return adst->chk_offset + adst->chk_len; | |
281 | } | |
282 | ||
bb845ae4 | 283 | static int pblk_set_addrf(struct pblk *pblk) |
e46f4e48 JG |
284 | { |
285 | struct nvm_tgt_dev *dev = pblk->dev; | |
286 | struct nvm_geo *geo = &dev->geo; | |
287 | int mod; | |
288 | ||
3b2a3ad1 JG |
289 | switch (geo->version) { |
290 | case NVM_OCSSD_SPEC_12: | |
291 | div_u64_rem(geo->clba, pblk->min_write_pgs, &mod); | |
292 | if (mod) { | |
4e495a46 | 293 | pblk_err(pblk, "bad configuration of sectors/pages\n"); |
3b2a3ad1 JG |
294 | return -EINVAL; |
295 | } | |
296 | ||
4e495a46 MB |
297 | pblk->addrf_len = pblk_set_addrf_12(pblk, geo, |
298 | (void *)&pblk->addrf); | |
3b2a3ad1 JG |
299 | break; |
300 | case NVM_OCSSD_SPEC_20: | |
301 | pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf, | |
4e495a46 | 302 | &pblk->uaddrf); |
3b2a3ad1 JG |
303 | break; |
304 | default: | |
4e495a46 | 305 | pblk_err(pblk, "OCSSD revision not supported (%d)\n", |
3b2a3ad1 | 306 | geo->version); |
e46f4e48 JG |
307 | return -EINVAL; |
308 | } | |
309 | ||
a4bd217b JG |
310 | return 0; |
311 | } | |
312 | ||
1864de94 | 313 | static int pblk_create_global_caches(void) |
a4bd217b | 314 | { |
1864de94 HH |
315 | |
316 | pblk_caches.ws = kmem_cache_create("pblk_blk_ws", | |
a4bd217b | 317 | sizeof(struct pblk_line_ws), 0, 0, NULL); |
1864de94 | 318 | if (!pblk_caches.ws) |
a4bd217b | 319 | return -ENOMEM; |
a4bd217b | 320 | |
1864de94 | 321 | pblk_caches.rec = kmem_cache_create("pblk_rec", |
a4bd217b | 322 | sizeof(struct pblk_rec_ctx), 0, 0, NULL); |
1864de94 HH |
323 | if (!pblk_caches.rec) |
324 | goto fail_destroy_ws; | |
a4bd217b | 325 | |
1864de94 | 326 | pblk_caches.g_rq = kmem_cache_create("pblk_g_rq", pblk_g_rq_size, |
a4bd217b | 327 | 0, 0, NULL); |
1864de94 HH |
328 | if (!pblk_caches.g_rq) |
329 | goto fail_destroy_rec; | |
a4bd217b | 330 | |
1864de94 | 331 | pblk_caches.w_rq = kmem_cache_create("pblk_w_rq", pblk_w_rq_size, |
a4bd217b | 332 | 0, 0, NULL); |
1864de94 HH |
333 | if (!pblk_caches.w_rq) |
334 | goto fail_destroy_g_rq; | |
a4bd217b JG |
335 | |
336 | return 0; | |
1864de94 HH |
337 | |
338 | fail_destroy_g_rq: | |
339 | kmem_cache_destroy(pblk_caches.g_rq); | |
340 | fail_destroy_rec: | |
341 | kmem_cache_destroy(pblk_caches.rec); | |
342 | fail_destroy_ws: | |
343 | kmem_cache_destroy(pblk_caches.ws); | |
344 | ||
345 | return -ENOMEM; | |
346 | } | |
347 | ||
348 | static int pblk_get_global_caches(void) | |
349 | { | |
42bd0384 | 350 | int ret = 0; |
1864de94 HH |
351 | |
352 | mutex_lock(&pblk_caches.mutex); | |
353 | ||
42bd0384 JG |
354 | if (kref_get_unless_zero(&pblk_caches.kref)) |
355 | goto out; | |
1864de94 HH |
356 | |
357 | ret = pblk_create_global_caches(); | |
1864de94 | 358 | if (!ret) |
42bd0384 | 359 | kref_init(&pblk_caches.kref); |
1864de94 | 360 | |
42bd0384 | 361 | out: |
1864de94 | 362 | mutex_unlock(&pblk_caches.mutex); |
1864de94 HH |
363 | return ret; |
364 | } | |
365 | ||
366 | static void pblk_destroy_global_caches(struct kref *ref) | |
367 | { | |
368 | struct pblk_global_caches *c; | |
369 | ||
370 | c = container_of(ref, struct pblk_global_caches, kref); | |
371 | ||
372 | kmem_cache_destroy(c->ws); | |
373 | kmem_cache_destroy(c->rec); | |
374 | kmem_cache_destroy(c->g_rq); | |
375 | kmem_cache_destroy(c->w_rq); | |
a4bd217b JG |
376 | } |
377 | ||
1864de94 | 378 | static void pblk_put_global_caches(void) |
22a4e061 | 379 | { |
1864de94 HH |
380 | mutex_lock(&pblk_caches.mutex); |
381 | kref_put(&pblk_caches.kref, pblk_destroy_global_caches); | |
382 | mutex_unlock(&pblk_caches.mutex); | |
22a4e061 RP |
383 | } |
384 | ||
a4bd217b JG |
385 | static int pblk_core_init(struct pblk *pblk) |
386 | { | |
387 | struct nvm_tgt_dev *dev = pblk->dev; | |
388 | struct nvm_geo *geo = &dev->geo; | |
b906bbb6 | 389 | int ret, max_write_ppas; |
43d47127 JG |
390 | |
391 | atomic64_set(&pblk->user_wa, 0); | |
392 | atomic64_set(&pblk->pad_wa, 0); | |
393 | atomic64_set(&pblk->gc_wa, 0); | |
394 | pblk->user_rst_wa = 0; | |
395 | pblk->pad_rst_wa = 0; | |
396 | pblk->gc_rst_wa = 0; | |
397 | ||
398 | atomic64_set(&pblk->nr_flush, 0); | |
399 | pblk->nr_flush_rst = 0; | |
a4bd217b | 400 | |
8bbd45d0 | 401 | pblk->min_write_pgs = geo->ws_opt; |
55d8ec35 | 402 | pblk->min_write_pgs_data = pblk->min_write_pgs; |
43d47127 JG |
403 | max_write_ppas = pblk->min_write_pgs * geo->all_luns; |
404 | pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA); | |
8a57fc38 ZW |
405 | pblk->max_write_pgs = min_t(int, pblk->max_write_pgs, |
406 | queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT)); | |
43d47127 JG |
407 | pblk_set_sec_per_write(pblk, pblk->min_write_pgs); |
408 | ||
faa79f27 | 409 | pblk->oob_meta_size = geo->sos; |
55d8ec35 IK |
410 | if (!pblk_is_oob_meta_supported(pblk)) { |
411 | /* For drives which does not have OOB metadata feature | |
412 | * in order to support recovery feature we need to use | |
413 | * so called packed metadata. Packed metada will store | |
414 | * the same information as OOB metadata (l2p table mapping, | |
415 | * but in the form of the single page at the end of | |
416 | * every write request. | |
417 | */ | |
418 | if (pblk->min_write_pgs | |
419 | * sizeof(struct pblk_sec_meta) > PAGE_SIZE) { | |
420 | /* We want to keep all the packed metadata on single | |
421 | * page per write requests. So we need to ensure that | |
422 | * it will fit. | |
423 | * | |
424 | * This is more like sanity check, since there is | |
425 | * no device with such a big minimal write size | |
426 | * (above 1 metabytes). | |
427 | */ | |
428 | pblk_err(pblk, "Not supported min write size\n"); | |
429 | return -EINVAL; | |
430 | } | |
431 | /* For packed meta approach we do some simplification. | |
432 | * On read path we always issue requests which size | |
433 | * equal to max_write_pgs, with all pages filled with | |
434 | * user payload except of last one page which will be | |
435 | * filled with packed metadata. | |
436 | */ | |
437 | pblk->max_write_pgs = pblk->min_write_pgs; | |
438 | pblk->min_write_pgs_data = pblk->min_write_pgs - 1; | |
faa79f27 IK |
439 | } |
440 | ||
6396bb22 | 441 | pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t), |
43d47127 JG |
442 | GFP_KERNEL); |
443 | if (!pblk->pad_dist) | |
a4bd217b JG |
444 | return -ENOMEM; |
445 | ||
1864de94 | 446 | if (pblk_get_global_caches()) |
43d47127 JG |
447 | goto fail_free_pad_dist; |
448 | ||
b84ae4a8 | 449 | /* Internal bios can be at most the sectors signaled by the device. */ |
b906bbb6 KO |
450 | ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0); |
451 | if (ret) | |
22a4e061 | 452 | goto free_global_caches; |
a4bd217b | 453 | |
b906bbb6 | 454 | ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE, |
1864de94 | 455 | pblk_caches.ws); |
b906bbb6 | 456 | if (ret) |
bd432417 | 457 | goto free_page_bio_pool; |
a4bd217b | 458 | |
b906bbb6 | 459 | ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns, |
1864de94 | 460 | pblk_caches.rec); |
b906bbb6 | 461 | if (ret) |
b84ae4a8 | 462 | goto free_gen_ws_pool; |
a4bd217b | 463 | |
b906bbb6 | 464 | ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns, |
1864de94 | 465 | pblk_caches.g_rq); |
b906bbb6 | 466 | if (ret) |
a4bd217b JG |
467 | goto free_rec_pool; |
468 | ||
b906bbb6 | 469 | ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns, |
1864de94 | 470 | pblk_caches.g_rq); |
b906bbb6 | 471 | if (ret) |
0d880398 JG |
472 | goto free_r_rq_pool; |
473 | ||
b906bbb6 | 474 | ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns, |
1864de94 | 475 | pblk_caches.w_rq); |
b906bbb6 | 476 | if (ret) |
0d880398 | 477 | goto free_e_rq_pool; |
a4bd217b | 478 | |
ef576494 JG |
479 | pblk->close_wq = alloc_workqueue("pblk-close-wq", |
480 | WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS); | |
481 | if (!pblk->close_wq) | |
e72ec1d3 | 482 | goto free_w_rq_pool; |
a4bd217b | 483 | |
ef576494 JG |
484 | pblk->bb_wq = alloc_workqueue("pblk-bb-wq", |
485 | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); | |
486 | if (!pblk->bb_wq) | |
487 | goto free_close_wq; | |
488 | ||
7bd4d370 JG |
489 | pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq", |
490 | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); | |
491 | if (!pblk->r_end_wq) | |
ef576494 | 492 | goto free_bb_wq; |
a4bd217b | 493 | |
bb845ae4 | 494 | if (pblk_set_addrf(pblk)) |
7bd4d370 JG |
495 | goto free_r_end_wq; |
496 | ||
a4bd217b | 497 | INIT_LIST_HEAD(&pblk->compl_list); |
6a3abf5b | 498 | INIT_LIST_HEAD(&pblk->resubmit_list); |
43d47127 | 499 | |
a4bd217b JG |
500 | return 0; |
501 | ||
7bd4d370 JG |
502 | free_r_end_wq: |
503 | destroy_workqueue(pblk->r_end_wq); | |
ef576494 JG |
504 | free_bb_wq: |
505 | destroy_workqueue(pblk->bb_wq); | |
506 | free_close_wq: | |
507 | destroy_workqueue(pblk->close_wq); | |
a4bd217b | 508 | free_w_rq_pool: |
b906bbb6 | 509 | mempool_exit(&pblk->w_rq_pool); |
0d880398 | 510 | free_e_rq_pool: |
b906bbb6 | 511 | mempool_exit(&pblk->e_rq_pool); |
0d880398 | 512 | free_r_rq_pool: |
b906bbb6 | 513 | mempool_exit(&pblk->r_rq_pool); |
a4bd217b | 514 | free_rec_pool: |
b906bbb6 | 515 | mempool_exit(&pblk->rec_pool); |
b84ae4a8 | 516 | free_gen_ws_pool: |
b906bbb6 | 517 | mempool_exit(&pblk->gen_ws_pool); |
bd432417 | 518 | free_page_bio_pool: |
b906bbb6 | 519 | mempool_exit(&pblk->page_bio_pool); |
22a4e061 | 520 | free_global_caches: |
1864de94 | 521 | pblk_put_global_caches(); |
43d47127 JG |
522 | fail_free_pad_dist: |
523 | kfree(pblk->pad_dist); | |
a4bd217b JG |
524 | return -ENOMEM; |
525 | } | |
526 | ||
527 | static void pblk_core_free(struct pblk *pblk) | |
528 | { | |
ef576494 JG |
529 | if (pblk->close_wq) |
530 | destroy_workqueue(pblk->close_wq); | |
531 | ||
7bd4d370 JG |
532 | if (pblk->r_end_wq) |
533 | destroy_workqueue(pblk->r_end_wq); | |
534 | ||
ef576494 JG |
535 | if (pblk->bb_wq) |
536 | destroy_workqueue(pblk->bb_wq); | |
a4bd217b | 537 | |
b906bbb6 KO |
538 | mempool_exit(&pblk->page_bio_pool); |
539 | mempool_exit(&pblk->gen_ws_pool); | |
540 | mempool_exit(&pblk->rec_pool); | |
541 | mempool_exit(&pblk->r_rq_pool); | |
542 | mempool_exit(&pblk->e_rq_pool); | |
543 | mempool_exit(&pblk->w_rq_pool); | |
a4bd217b | 544 | |
1864de94 | 545 | pblk_put_global_caches(); |
43d47127 | 546 | kfree(pblk->pad_dist); |
a4bd217b JG |
547 | } |
548 | ||
e411b331 JG |
549 | static void pblk_line_mg_free(struct pblk *pblk) |
550 | { | |
551 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
552 | int i; | |
553 | ||
554 | kfree(l_mg->bb_template); | |
555 | kfree(l_mg->bb_aux); | |
556 | kfree(l_mg->vsc_list); | |
557 | ||
558 | for (i = 0; i < PBLK_DATA_LINES; i++) { | |
559 | kfree(l_mg->sline_meta[i]); | |
560 | pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type); | |
561 | kfree(l_mg->eline_meta[i]); | |
562 | } | |
53d82db6 HH |
563 | |
564 | mempool_destroy(l_mg->bitmap_pool); | |
565 | kmem_cache_destroy(l_mg->bitmap_cache); | |
e411b331 JG |
566 | } |
567 | ||
48b8d208 HH |
568 | static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg, |
569 | struct pblk_line *line) | |
dffdd960 | 570 | { |
48b8d208 HH |
571 | struct pblk_w_err_gc *w_err_gc = line->w_err_gc; |
572 | ||
dffdd960 JG |
573 | kfree(line->blk_bitmap); |
574 | kfree(line->erase_bitmap); | |
32ef9412 | 575 | kfree(line->chks); |
48b8d208 HH |
576 | |
577 | pblk_mfree(w_err_gc->lba_list, l_mg->emeta_alloc_type); | |
578 | kfree(w_err_gc); | |
dffdd960 JG |
579 | } |
580 | ||
a4bd217b JG |
581 | static void pblk_lines_free(struct pblk *pblk) |
582 | { | |
583 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
584 | struct pblk_line *line; | |
585 | int i; | |
586 | ||
a4bd217b JG |
587 | for (i = 0; i < l_mg->nr_lines; i++) { |
588 | line = &pblk->lines[i]; | |
589 | ||
8e55c07b | 590 | pblk_line_free(line); |
48b8d208 | 591 | pblk_line_meta_free(l_mg, line); |
a4bd217b | 592 | } |
43d47127 JG |
593 | |
594 | pblk_line_mg_free(pblk); | |
595 | ||
596 | kfree(pblk->luns); | |
597 | kfree(pblk->lines); | |
a4bd217b JG |
598 | } |
599 | ||
43d47127 | 600 | static int pblk_luns_init(struct pblk *pblk) |
a4bd217b JG |
601 | { |
602 | struct nvm_tgt_dev *dev = pblk->dev; | |
603 | struct nvm_geo *geo = &dev->geo; | |
604 | struct pblk_lun *rlun; | |
e411b331 | 605 | int i; |
a4bd217b JG |
606 | |
607 | /* TODO: Implement unbalanced LUN support */ | |
a40afad9 | 608 | if (geo->num_lun < 0) { |
4e495a46 | 609 | pblk_err(pblk, "unbalanced LUN config.\n"); |
a4bd217b JG |
610 | return -EINVAL; |
611 | } | |
612 | ||
fae7fae4 MB |
613 | pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun), |
614 | GFP_KERNEL); | |
a4bd217b JG |
615 | if (!pblk->luns) |
616 | return -ENOMEM; | |
617 | ||
fae7fae4 | 618 | for (i = 0; i < geo->all_luns; i++) { |
a4bd217b | 619 | /* Stripe across channels */ |
a40afad9 JG |
620 | int ch = i % geo->num_ch; |
621 | int lun_raw = i / geo->num_ch; | |
622 | int lunid = lun_raw + ch * geo->num_lun; | |
a4bd217b JG |
623 | |
624 | rlun = &pblk->luns[i]; | |
43d47127 | 625 | rlun->bppa = dev->luns[lunid]; |
a4bd217b JG |
626 | |
627 | sema_init(&rlun->wr_sem, 1); | |
a4bd217b JG |
628 | } |
629 | ||
630 | return 0; | |
631 | } | |
632 | ||
a4bd217b | 633 | /* See comment over struct line_emeta definition */ |
dd2a4343 | 634 | static unsigned int calc_emeta_len(struct pblk *pblk) |
a4bd217b | 635 | { |
dd2a4343 JG |
636 | struct pblk_line_meta *lm = &pblk->lm; |
637 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
638 | struct nvm_tgt_dev *dev = pblk->dev; | |
639 | struct nvm_geo *geo = &dev->geo; | |
640 | ||
641 | /* Round to sector size so that lba_list starts on its own sector */ | |
642 | lm->emeta_sec[1] = DIV_ROUND_UP( | |
76758390 | 643 | sizeof(struct line_emeta) + lm->blk_bitmap_len + |
e46f4e48 JG |
644 | sizeof(struct wa_counters), geo->csecs); |
645 | lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs; | |
dd2a4343 JG |
646 | |
647 | /* Round to sector size so that vsc_list starts on its own sector */ | |
648 | lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0]; | |
649 | lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64), | |
e46f4e48 JG |
650 | geo->csecs); |
651 | lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs; | |
dd2a4343 JG |
652 | |
653 | lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32), | |
e46f4e48 JG |
654 | geo->csecs); |
655 | lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs; | |
dd2a4343 JG |
656 | |
657 | lm->vsc_list_len = l_mg->nr_lines * sizeof(u32); | |
658 | ||
659 | return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]); | |
a4bd217b JG |
660 | } |
661 | ||
3bcebc5b | 662 | static int pblk_set_provision(struct pblk *pblk, int nr_free_chks) |
a4bd217b JG |
663 | { |
664 | struct nvm_tgt_dev *dev = pblk->dev; | |
a7689938 JG |
665 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; |
666 | struct pblk_line_meta *lm = &pblk->lm; | |
a4bd217b JG |
667 | struct nvm_geo *geo = &dev->geo; |
668 | sector_t provisioned; | |
55d8ec35 | 669 | int sec_meta, blk_meta, clba; |
3bcebc5b | 670 | int minimum; |
a4bd217b | 671 | |
e5392739 JG |
672 | if (geo->op == NVM_TARGET_DEFAULT_OP) |
673 | pblk->op = PBLK_DEFAULT_OP; | |
674 | else | |
675 | pblk->op = geo->op; | |
a4bd217b | 676 | |
3bcebc5b HH |
677 | minimum = pblk_get_min_chks(pblk); |
678 | provisioned = nr_free_chks; | |
a7689938 | 679 | provisioned *= (100 - pblk->op); |
a4bd217b JG |
680 | sector_div(provisioned, 100); |
681 | ||
3bcebc5b HH |
682 | if ((nr_free_chks - provisioned) < minimum) { |
683 | if (geo->op != NVM_TARGET_DEFAULT_OP) { | |
684 | pblk_err(pblk, "OP too small to create a sane instance\n"); | |
685 | return -EINTR; | |
686 | } | |
687 | ||
688 | /* If the user did not specify an OP value, and PBLK_DEFAULT_OP | |
689 | * is not enough, calculate and set sane value | |
690 | */ | |
691 | ||
692 | provisioned = nr_free_chks - minimum; | |
693 | pblk->op = (100 * minimum) / nr_free_chks; | |
694 | pblk_info(pblk, "Default OP insufficient, adjusting OP to %d\n", | |
695 | pblk->op); | |
696 | } | |
697 | ||
698 | pblk->op_blks = nr_free_chks - provisioned; | |
a7689938 | 699 | |
a4bd217b JG |
700 | /* Internally pblk manages all free blocks, but all calculations based |
701 | * on user capacity consider only provisioned blocks | |
702 | */ | |
3bcebc5b HH |
703 | pblk->rl.total_blocks = nr_free_chks; |
704 | pblk->rl.nr_secs = nr_free_chks * geo->clba; | |
a7689938 JG |
705 | |
706 | /* Consider sectors used for metadata */ | |
707 | sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines; | |
e46f4e48 | 708 | blk_meta = DIV_ROUND_UP(sec_meta, geo->clba); |
a7689938 | 709 | |
55d8ec35 IK |
710 | clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data; |
711 | pblk->capacity = (provisioned - blk_meta) * clba; | |
a7689938 | 712 | |
3bcebc5b HH |
713 | atomic_set(&pblk->rl.free_blocks, nr_free_chks); |
714 | atomic_set(&pblk->rl.free_user_blocks, nr_free_chks); | |
715 | ||
716 | return 0; | |
a4bd217b JG |
717 | } |
718 | ||
aff3fb18 | 719 | static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line, |
32ef9412 JG |
720 | struct nvm_chk_meta *meta) |
721 | { | |
722 | struct nvm_tgt_dev *dev = pblk->dev; | |
723 | struct nvm_geo *geo = &dev->geo; | |
724 | struct pblk_line_meta *lm = &pblk->lm; | |
725 | int i, nr_bad_chks = 0; | |
726 | ||
727 | for (i = 0; i < lm->blk_per_line; i++) { | |
728 | struct pblk_lun *rlun = &pblk->luns[i]; | |
729 | struct nvm_chk_meta *chunk; | |
730 | struct nvm_chk_meta *chunk_meta; | |
731 | struct ppa_addr ppa; | |
732 | int pos; | |
733 | ||
734 | ppa = rlun->bppa; | |
735 | pos = pblk_ppa_to_pos(geo, ppa); | |
736 | chunk = &line->chks[pos]; | |
737 | ||
738 | ppa.m.chk = line->id; | |
739 | chunk_meta = pblk_chunk_get_off(pblk, meta, ppa); | |
740 | ||
741 | chunk->state = chunk_meta->state; | |
742 | chunk->type = chunk_meta->type; | |
743 | chunk->wi = chunk_meta->wi; | |
744 | chunk->slba = chunk_meta->slba; | |
745 | chunk->cnlb = chunk_meta->cnlb; | |
746 | chunk->wp = chunk_meta->wp; | |
747 | ||
4c44abf4 HH |
748 | trace_pblk_chunk_state(pblk_disk_name(pblk), &ppa, |
749 | chunk->state); | |
750 | ||
32ef9412 JG |
751 | if (chunk->type & NVM_CHK_TP_SZ_SPEC) { |
752 | WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n"); | |
753 | continue; | |
754 | } | |
755 | ||
6f9c9607 JG |
756 | if (!(chunk->state & NVM_CHK_ST_OFFLINE)) |
757 | continue; | |
758 | ||
32ef9412 JG |
759 | set_bit(pos, line->blk_bitmap); |
760 | nr_bad_chks++; | |
761 | } | |
762 | ||
763 | return nr_bad_chks; | |
764 | } | |
765 | ||
766 | static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line, | |
767 | void *chunk_meta, int line_id) | |
768 | { | |
32ef9412 JG |
769 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; |
770 | struct pblk_line_meta *lm = &pblk->lm; | |
771 | long nr_bad_chks, chk_in_line; | |
772 | ||
773 | line->pblk = pblk; | |
774 | line->id = line_id; | |
775 | line->type = PBLK_LINETYPE_FREE; | |
776 | line->state = PBLK_LINESTATE_NEW; | |
777 | line->gc_group = PBLK_LINEGC_NONE; | |
778 | line->vsc = &l_mg->vsc_list[line_id]; | |
779 | spin_lock_init(&line->lock); | |
780 | ||
aff3fb18 | 781 | nr_bad_chks = pblk_setup_line_meta_chk(pblk, line, chunk_meta); |
32ef9412 JG |
782 | |
783 | chk_in_line = lm->blk_per_line - nr_bad_chks; | |
784 | if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line || | |
785 | chk_in_line < lm->min_blk_line) { | |
786 | line->state = PBLK_LINESTATE_BAD; | |
787 | list_add_tail(&line->list, &l_mg->bad_list); | |
788 | return 0; | |
789 | } | |
790 | ||
791 | atomic_set(&line->blk_in_line, chk_in_line); | |
792 | list_add_tail(&line->list, &l_mg->free_list); | |
793 | l_mg->nr_free_lines++; | |
794 | ||
795 | return chk_in_line; | |
796 | } | |
797 | ||
798 | static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line) | |
43d47127 JG |
799 | { |
800 | struct pblk_line_meta *lm = &pblk->lm; | |
801 | ||
802 | line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL); | |
803 | if (!line->blk_bitmap) | |
804 | return -ENOMEM; | |
805 | ||
806 | line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL); | |
48b8d208 HH |
807 | if (!line->erase_bitmap) |
808 | goto free_blk_bitmap; | |
809 | ||
43d47127 | 810 | |
6da2ec56 KC |
811 | line->chks = kmalloc_array(lm->blk_per_line, |
812 | sizeof(struct nvm_chk_meta), GFP_KERNEL); | |
48b8d208 HH |
813 | if (!line->chks) |
814 | goto free_erase_bitmap; | |
815 | ||
816 | line->w_err_gc = kzalloc(sizeof(struct pblk_w_err_gc), GFP_KERNEL); | |
817 | if (!line->w_err_gc) | |
818 | goto free_chks; | |
43d47127 JG |
819 | |
820 | return 0; | |
48b8d208 HH |
821 | |
822 | free_chks: | |
823 | kfree(line->chks); | |
824 | free_erase_bitmap: | |
825 | kfree(line->erase_bitmap); | |
826 | free_blk_bitmap: | |
827 | kfree(line->blk_bitmap); | |
828 | return -ENOMEM; | |
43d47127 JG |
829 | } |
830 | ||
831 | static int pblk_line_mg_init(struct pblk *pblk) | |
dd2a4343 | 832 | { |
43d47127 JG |
833 | struct nvm_tgt_dev *dev = pblk->dev; |
834 | struct nvm_geo *geo = &dev->geo; | |
dd2a4343 JG |
835 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; |
836 | struct pblk_line_meta *lm = &pblk->lm; | |
43d47127 JG |
837 | int i, bb_distance; |
838 | ||
a40afad9 | 839 | l_mg->nr_lines = geo->num_chk; |
43d47127 JG |
840 | l_mg->log_line = l_mg->data_line = NULL; |
841 | l_mg->l_seq_nr = l_mg->d_seq_nr = 0; | |
842 | l_mg->nr_free_lines = 0; | |
843 | bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES); | |
844 | ||
845 | INIT_LIST_HEAD(&l_mg->free_list); | |
846 | INIT_LIST_HEAD(&l_mg->corrupt_list); | |
847 | INIT_LIST_HEAD(&l_mg->bad_list); | |
848 | INIT_LIST_HEAD(&l_mg->gc_full_list); | |
849 | INIT_LIST_HEAD(&l_mg->gc_high_list); | |
850 | INIT_LIST_HEAD(&l_mg->gc_mid_list); | |
851 | INIT_LIST_HEAD(&l_mg->gc_low_list); | |
852 | INIT_LIST_HEAD(&l_mg->gc_empty_list); | |
48b8d208 | 853 | INIT_LIST_HEAD(&l_mg->gc_werr_list); |
43d47127 JG |
854 | |
855 | INIT_LIST_HEAD(&l_mg->emeta_list); | |
856 | ||
48b8d208 HH |
857 | l_mg->gc_lists[0] = &l_mg->gc_werr_list; |
858 | l_mg->gc_lists[1] = &l_mg->gc_high_list; | |
859 | l_mg->gc_lists[2] = &l_mg->gc_mid_list; | |
860 | l_mg->gc_lists[3] = &l_mg->gc_low_list; | |
43d47127 JG |
861 | |
862 | spin_lock_init(&l_mg->free_lock); | |
863 | spin_lock_init(&l_mg->close_lock); | |
864 | spin_lock_init(&l_mg->gc_lock); | |
865 | ||
866 | l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL); | |
867 | if (!l_mg->vsc_list) | |
868 | goto fail; | |
869 | ||
870 | l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL); | |
871 | if (!l_mg->bb_template) | |
872 | goto fail_free_vsc_list; | |
873 | ||
874 | l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL); | |
875 | if (!l_mg->bb_aux) | |
876 | goto fail_free_bb_template; | |
dd2a4343 JG |
877 | |
878 | /* smeta is always small enough to fit on a kmalloc memory allocation, | |
879 | * emeta depends on the number of LUNs allocated to the pblk instance | |
880 | */ | |
dd2a4343 JG |
881 | for (i = 0; i < PBLK_DATA_LINES; i++) { |
882 | l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL); | |
883 | if (!l_mg->sline_meta[i]) | |
884 | goto fail_free_smeta; | |
885 | } | |
886 | ||
53d82db6 HH |
887 | l_mg->bitmap_cache = kmem_cache_create("pblk_lm_bitmap", |
888 | lm->sec_bitmap_len, 0, 0, NULL); | |
889 | if (!l_mg->bitmap_cache) | |
890 | goto fail_free_smeta; | |
891 | ||
892 | /* the bitmap pool is used for both valid and map bitmaps */ | |
893 | l_mg->bitmap_pool = mempool_create_slab_pool(PBLK_DATA_LINES * 2, | |
894 | l_mg->bitmap_cache); | |
895 | if (!l_mg->bitmap_pool) | |
896 | goto fail_destroy_bitmap_cache; | |
897 | ||
dd2a4343 JG |
898 | /* emeta allocates three different buffers for managing metadata with |
899 | * in-memory and in-media layouts | |
900 | */ | |
901 | for (i = 0; i < PBLK_DATA_LINES; i++) { | |
902 | struct pblk_emeta *emeta; | |
903 | ||
904 | emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL); | |
905 | if (!emeta) | |
906 | goto fail_free_emeta; | |
907 | ||
908 | if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) { | |
909 | l_mg->emeta_alloc_type = PBLK_VMALLOC_META; | |
910 | ||
911 | emeta->buf = vmalloc(lm->emeta_len[0]); | |
912 | if (!emeta->buf) { | |
913 | kfree(emeta); | |
914 | goto fail_free_emeta; | |
915 | } | |
916 | ||
917 | emeta->nr_entries = lm->emeta_sec[0]; | |
918 | l_mg->eline_meta[i] = emeta; | |
919 | } else { | |
920 | l_mg->emeta_alloc_type = PBLK_KMALLOC_META; | |
921 | ||
922 | emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL); | |
923 | if (!emeta->buf) { | |
924 | kfree(emeta); | |
925 | goto fail_free_emeta; | |
926 | } | |
927 | ||
928 | emeta->nr_entries = lm->emeta_sec[0]; | |
929 | l_mg->eline_meta[i] = emeta; | |
930 | } | |
931 | } | |
932 | ||
dd2a4343 JG |
933 | for (i = 0; i < l_mg->nr_lines; i++) |
934 | l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY); | |
935 | ||
43d47127 JG |
936 | bb_distance = (geo->all_luns) * geo->ws_opt; |
937 | for (i = 0; i < lm->sec_per_line; i += bb_distance) | |
938 | bitmap_set(l_mg->bb_template, i, geo->ws_opt); | |
939 | ||
dd2a4343 JG |
940 | return 0; |
941 | ||
942 | fail_free_emeta: | |
943 | while (--i >= 0) { | |
c9d84b35 RP |
944 | if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META) |
945 | vfree(l_mg->eline_meta[i]->buf); | |
946 | else | |
947 | kfree(l_mg->eline_meta[i]->buf); | |
f680f19a | 948 | kfree(l_mg->eline_meta[i]); |
dd2a4343 | 949 | } |
53d82db6 HH |
950 | |
951 | mempool_destroy(l_mg->bitmap_pool); | |
952 | fail_destroy_bitmap_cache: | |
953 | kmem_cache_destroy(l_mg->bitmap_cache); | |
dd2a4343 JG |
954 | fail_free_smeta: |
955 | for (i = 0; i < PBLK_DATA_LINES; i++) | |
f680f19a | 956 | kfree(l_mg->sline_meta[i]); |
43d47127 JG |
957 | kfree(l_mg->bb_aux); |
958 | fail_free_bb_template: | |
959 | kfree(l_mg->bb_template); | |
960 | fail_free_vsc_list: | |
961 | kfree(l_mg->vsc_list); | |
962 | fail: | |
dd2a4343 JG |
963 | return -ENOMEM; |
964 | } | |
965 | ||
43d47127 | 966 | static int pblk_line_meta_init(struct pblk *pblk) |
a4bd217b JG |
967 | { |
968 | struct nvm_tgt_dev *dev = pblk->dev; | |
969 | struct nvm_geo *geo = &dev->geo; | |
a4bd217b | 970 | struct pblk_line_meta *lm = &pblk->lm; |
a4bd217b | 971 | unsigned int smeta_len, emeta_len; |
43d47127 | 972 | int i; |
a4bd217b | 973 | |
e46f4e48 | 974 | lm->sec_per_line = geo->clba * geo->all_luns; |
fae7fae4 MB |
975 | lm->blk_per_line = geo->all_luns; |
976 | lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long); | |
a4bd217b | 977 | lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long); |
fae7fae4 | 978 | lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long); |
27b97872 RP |
979 | lm->mid_thrs = lm->sec_per_line / 2; |
980 | lm->high_thrs = lm->sec_per_line / 4; | |
fae7fae4 | 981 | lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs; |
a4bd217b JG |
982 | |
983 | /* Calculate necessary pages for smeta. See comment over struct | |
984 | * line_smeta definition | |
985 | */ | |
a4bd217b JG |
986 | i = 1; |
987 | add_smeta_page: | |
e46f4e48 JG |
988 | lm->smeta_sec = i * geo->ws_opt; |
989 | lm->smeta_len = lm->smeta_sec * geo->csecs; | |
a4bd217b | 990 | |
dd2a4343 | 991 | smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len; |
a4bd217b JG |
992 | if (smeta_len > lm->smeta_len) { |
993 | i++; | |
994 | goto add_smeta_page; | |
995 | } | |
996 | ||
997 | /* Calculate necessary pages for emeta. See comment over struct | |
998 | * line_emeta definition | |
999 | */ | |
1000 | i = 1; | |
1001 | add_emeta_page: | |
e46f4e48 JG |
1002 | lm->emeta_sec[0] = i * geo->ws_opt; |
1003 | lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs; | |
a4bd217b | 1004 | |
dd2a4343 JG |
1005 | emeta_len = calc_emeta_len(pblk); |
1006 | if (emeta_len > lm->emeta_len[0]) { | |
a4bd217b JG |
1007 | i++; |
1008 | goto add_emeta_page; | |
1009 | } | |
a4bd217b | 1010 | |
fae7fae4 | 1011 | lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0; |
21d22871 JG |
1012 | |
1013 | lm->min_blk_line = 1; | |
fae7fae4 | 1014 | if (geo->all_luns > 1) |
21d22871 | 1015 | lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec + |
e46f4e48 | 1016 | lm->emeta_sec[0], geo->clba); |
21d22871 | 1017 | |
b5e063a2 | 1018 | if (lm->min_blk_line > lm->blk_per_line) { |
4e495a46 | 1019 | pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n", |
b5e063a2 | 1020 | lm->blk_per_line); |
e411b331 | 1021 | return -EINVAL; |
b5e063a2 | 1022 | } |
a4bd217b | 1023 | |
43d47127 JG |
1024 | return 0; |
1025 | } | |
1026 | ||
1027 | static int pblk_lines_init(struct pblk *pblk) | |
1028 | { | |
1029 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
43d47127 | 1030 | struct pblk_line *line; |
32ef9412 | 1031 | void *chunk_meta; |
3bcebc5b | 1032 | int nr_free_chks = 0; |
43d47127 JG |
1033 | int i, ret; |
1034 | ||
1035 | ret = pblk_line_meta_init(pblk); | |
dd2a4343 | 1036 | if (ret) |
e411b331 | 1037 | return ret; |
a4bd217b | 1038 | |
43d47127 JG |
1039 | ret = pblk_line_mg_init(pblk); |
1040 | if (ret) | |
1041 | return ret; | |
1042 | ||
1043 | ret = pblk_luns_init(pblk); | |
1044 | if (ret) | |
a4bd217b JG |
1045 | goto fail_free_meta; |
1046 | ||
aff3fb18 | 1047 | chunk_meta = pblk_get_chunk_meta(pblk); |
32ef9412 JG |
1048 | if (IS_ERR(chunk_meta)) { |
1049 | ret = PTR_ERR(chunk_meta); | |
43d47127 | 1050 | goto fail_free_luns; |
1c6286f2 | 1051 | } |
a4bd217b | 1052 | |
a4bd217b JG |
1053 | pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line), |
1054 | GFP_KERNEL); | |
1c6286f2 DC |
1055 | if (!pblk->lines) { |
1056 | ret = -ENOMEM; | |
32ef9412 | 1057 | goto fail_free_chunk_meta; |
e411b331 JG |
1058 | } |
1059 | ||
a4bd217b JG |
1060 | for (i = 0; i < l_mg->nr_lines; i++) { |
1061 | line = &pblk->lines[i]; | |
1062 | ||
32ef9412 | 1063 | ret = pblk_alloc_line_meta(pblk, line); |
dffdd960 | 1064 | if (ret) |
43d47127 | 1065 | goto fail_free_lines; |
dffdd960 | 1066 | |
32ef9412 | 1067 | nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i); |
f2937232 HH |
1068 | |
1069 | trace_pblk_line_state(pblk_disk_name(pblk), line->id, | |
1070 | line->state); | |
a4bd217b JG |
1071 | } |
1072 | ||
2deeefc0 | 1073 | if (!nr_free_chks) { |
4e495a46 | 1074 | pblk_err(pblk, "too many bad blocks prevent for sane instance\n"); |
a70985f8 WY |
1075 | ret = -EINTR; |
1076 | goto fail_free_lines; | |
2deeefc0 JG |
1077 | } |
1078 | ||
3bcebc5b HH |
1079 | ret = pblk_set_provision(pblk, nr_free_chks); |
1080 | if (ret) | |
1081 | goto fail_free_lines; | |
a4bd217b | 1082 | |
090ee26f | 1083 | vfree(chunk_meta); |
a4bd217b | 1084 | return 0; |
e411b331 | 1085 | |
43d47127 | 1086 | fail_free_lines: |
dffdd960 | 1087 | while (--i >= 0) |
48b8d208 | 1088 | pblk_line_meta_free(l_mg, &pblk->lines[i]); |
43d47127 | 1089 | kfree(pblk->lines); |
32ef9412 | 1090 | fail_free_chunk_meta: |
0934ce87 | 1091 | vfree(chunk_meta); |
43d47127 JG |
1092 | fail_free_luns: |
1093 | kfree(pblk->luns); | |
a4bd217b | 1094 | fail_free_meta: |
e411b331 | 1095 | pblk_line_mg_free(pblk); |
a4bd217b JG |
1096 | |
1097 | return ret; | |
1098 | } | |
1099 | ||
1100 | static int pblk_writer_init(struct pblk *pblk) | |
1101 | { | |
a4bd217b JG |
1102 | pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t"); |
1103 | if (IS_ERR(pblk->writer_ts)) { | |
cc4f5ba1 JG |
1104 | int err = PTR_ERR(pblk->writer_ts); |
1105 | ||
1106 | if (err != -EINTR) | |
4e495a46 | 1107 | pblk_err(pblk, "could not allocate writer kthread (%d)\n", |
cc4f5ba1 JG |
1108 | err); |
1109 | return err; | |
a4bd217b JG |
1110 | } |
1111 | ||
cc4f5ba1 JG |
1112 | timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0); |
1113 | mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100)); | |
1114 | ||
a4bd217b JG |
1115 | return 0; |
1116 | } | |
1117 | ||
1118 | static void pblk_writer_stop(struct pblk *pblk) | |
1119 | { | |
ee8d5c1a JG |
1120 | /* The pipeline must be stopped and the write buffer emptied before the |
1121 | * write thread is stopped | |
1122 | */ | |
1123 | WARN(pblk_rb_read_count(&pblk->rwb), | |
1124 | "Stopping not fully persisted write buffer\n"); | |
1125 | ||
1126 | WARN(pblk_rb_sync_count(&pblk->rwb), | |
1127 | "Stopping not fully synced write buffer\n"); | |
1128 | ||
7be970b2 | 1129 | del_timer_sync(&pblk->wtimer); |
a4bd217b JG |
1130 | if (pblk->writer_ts) |
1131 | kthread_stop(pblk->writer_ts); | |
a4bd217b JG |
1132 | } |
1133 | ||
1134 | static void pblk_free(struct pblk *pblk) | |
1135 | { | |
a4bd217b | 1136 | pblk_lines_free(pblk); |
a4bd217b | 1137 | pblk_l2p_free(pblk); |
43d47127 JG |
1138 | pblk_rwb_free(pblk); |
1139 | pblk_core_free(pblk); | |
a4bd217b JG |
1140 | |
1141 | kfree(pblk); | |
1142 | } | |
1143 | ||
a7c9e910 | 1144 | static void pblk_tear_down(struct pblk *pblk, bool graceful) |
a4bd217b | 1145 | { |
a7c9e910 JG |
1146 | if (graceful) |
1147 | __pblk_pipeline_flush(pblk); | |
1148 | __pblk_pipeline_stop(pblk); | |
a4bd217b JG |
1149 | pblk_writer_stop(pblk); |
1150 | pblk_rb_sync_l2p(&pblk->rwb); | |
a4bd217b JG |
1151 | pblk_rl_free(&pblk->rl); |
1152 | ||
4e495a46 | 1153 | pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful); |
a4bd217b JG |
1154 | } |
1155 | ||
a7c9e910 | 1156 | static void pblk_exit(void *private, bool graceful) |
a4bd217b JG |
1157 | { |
1158 | struct pblk *pblk = private; | |
1159 | ||
a7c9e910 JG |
1160 | pblk_gc_exit(pblk, graceful); |
1161 | pblk_tear_down(pblk, graceful); | |
c5586192 | 1162 | |
880eda54 | 1163 | #ifdef CONFIG_NVM_PBLK_DEBUG |
4e495a46 | 1164 | pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk)); |
c5586192 HH |
1165 | #endif |
1166 | ||
a4bd217b | 1167 | pblk_free(pblk); |
a4bd217b JG |
1168 | } |
1169 | ||
1170 | static sector_t pblk_capacity(void *private) | |
1171 | { | |
1172 | struct pblk *pblk = private; | |
1173 | ||
1174 | return pblk->capacity * NR_PHY_IN_LOG; | |
1175 | } | |
1176 | ||
1177 | static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, | |
1178 | int flags) | |
1179 | { | |
1180 | struct nvm_geo *geo = &dev->geo; | |
1181 | struct request_queue *bqueue = dev->q; | |
1182 | struct request_queue *tqueue = tdisk->queue; | |
1183 | struct pblk *pblk; | |
1184 | int ret; | |
1185 | ||
4e495a46 MB |
1186 | pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL); |
1187 | if (!pblk) | |
1188 | return ERR_PTR(-ENOMEM); | |
1189 | ||
1190 | pblk->dev = dev; | |
1191 | pblk->disk = tdisk; | |
1192 | pblk->state = PBLK_STATE_RUNNING; | |
1b0dd0bf | 1193 | trace_pblk_state(pblk_disk_name(pblk), pblk->state); |
4e495a46 MB |
1194 | pblk->gc.gc_enabled = 0; |
1195 | ||
3b2a3ad1 JG |
1196 | if (!(geo->version == NVM_OCSSD_SPEC_12 || |
1197 | geo->version == NVM_OCSSD_SPEC_20)) { | |
4e495a46 | 1198 | pblk_err(pblk, "OCSSD version not supported (%u)\n", |
7ad5039e | 1199 | geo->version); |
4e495a46 | 1200 | kfree(pblk); |
7ad5039e JG |
1201 | return ERR_PTR(-EINVAL); |
1202 | } | |
1203 | ||
a16816b9 IK |
1204 | if (geo->ext) { |
1205 | pblk_err(pblk, "extended metadata not supported\n"); | |
1206 | kfree(pblk); | |
1207 | return ERR_PTR(-EINVAL); | |
1208 | } | |
1209 | ||
6a3abf5b | 1210 | spin_lock_init(&pblk->resubmit_lock); |
a4bd217b JG |
1211 | spin_lock_init(&pblk->trans_lock); |
1212 | spin_lock_init(&pblk->lock); | |
1213 | ||
880eda54 | 1214 | #ifdef CONFIG_NVM_PBLK_DEBUG |
a4bd217b JG |
1215 | atomic_long_set(&pblk->inflight_writes, 0); |
1216 | atomic_long_set(&pblk->padded_writes, 0); | |
1217 | atomic_long_set(&pblk->padded_wb, 0); | |
a4bd217b JG |
1218 | atomic_long_set(&pblk->req_writes, 0); |
1219 | atomic_long_set(&pblk->sub_writes, 0); | |
1220 | atomic_long_set(&pblk->sync_writes, 0); | |
a4bd217b | 1221 | atomic_long_set(&pblk->inflight_reads, 0); |
db7ada33 | 1222 | atomic_long_set(&pblk->cache_reads, 0); |
a4bd217b JG |
1223 | atomic_long_set(&pblk->sync_reads, 0); |
1224 | atomic_long_set(&pblk->recov_writes, 0); | |
1225 | atomic_long_set(&pblk->recov_writes, 0); | |
1226 | atomic_long_set(&pblk->recov_gc_writes, 0); | |
a1121176 | 1227 | atomic_long_set(&pblk->recov_gc_reads, 0); |
a4bd217b JG |
1228 | #endif |
1229 | ||
1230 | atomic_long_set(&pblk->read_failed, 0); | |
1231 | atomic_long_set(&pblk->read_empty, 0); | |
1232 | atomic_long_set(&pblk->read_high_ecc, 0); | |
1233 | atomic_long_set(&pblk->read_failed_gc, 0); | |
1234 | atomic_long_set(&pblk->write_failed, 0); | |
1235 | atomic_long_set(&pblk->erase_failed, 0); | |
1236 | ||
43d47127 | 1237 | ret = pblk_core_init(pblk); |
a4bd217b | 1238 | if (ret) { |
4e495a46 | 1239 | pblk_err(pblk, "could not initialize core\n"); |
a4bd217b JG |
1240 | goto fail; |
1241 | } | |
1242 | ||
1243 | ret = pblk_lines_init(pblk); | |
1244 | if (ret) { | |
4e495a46 | 1245 | pblk_err(pblk, "could not initialize lines\n"); |
43d47127 | 1246 | goto fail_free_core; |
5d149bfa HH |
1247 | } |
1248 | ||
43d47127 | 1249 | ret = pblk_rwb_init(pblk); |
a4bd217b | 1250 | if (ret) { |
4e495a46 | 1251 | pblk_err(pblk, "could not initialize write buffer\n"); |
43d47127 | 1252 | goto fail_free_lines; |
a4bd217b JG |
1253 | } |
1254 | ||
43d47127 | 1255 | ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY); |
a4bd217b | 1256 | if (ret) { |
4e495a46 | 1257 | pblk_err(pblk, "could not initialize maps\n"); |
43d47127 | 1258 | goto fail_free_rwb; |
a4bd217b JG |
1259 | } |
1260 | ||
1261 | ret = pblk_writer_init(pblk); | |
1262 | if (ret) { | |
cc4f5ba1 | 1263 | if (ret != -EINTR) |
4e495a46 | 1264 | pblk_err(pblk, "could not initialize write thread\n"); |
43d47127 | 1265 | goto fail_free_l2p; |
a4bd217b JG |
1266 | } |
1267 | ||
1268 | ret = pblk_gc_init(pblk); | |
1269 | if (ret) { | |
4e495a46 | 1270 | pblk_err(pblk, "could not initialize gc\n"); |
a4bd217b JG |
1271 | goto fail_stop_writer; |
1272 | } | |
1273 | ||
1274 | /* inherit the size from the underlying device */ | |
1275 | blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue)); | |
1276 | blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue)); | |
1277 | ||
1278 | blk_queue_write_cache(tqueue, true, false); | |
1279 | ||
e46f4e48 | 1280 | tqueue->limits.discard_granularity = geo->clba * geo->csecs; |
a4bd217b JG |
1281 | tqueue->limits.discard_alignment = 0; |
1282 | blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9); | |
8b904b5b | 1283 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue); |
a4bd217b | 1284 | |
4e495a46 | 1285 | pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n", |
fae7fae4 | 1286 | geo->all_luns, pblk->l_mg.nr_lines, |
a4bd217b JG |
1287 | (unsigned long long)pblk->rl.nr_secs, |
1288 | pblk->rwb.nr_entries); | |
1289 | ||
1290 | wake_up_process(pblk->writer_ts); | |
03661b5f HH |
1291 | |
1292 | /* Check if we need to start GC */ | |
1293 | pblk_gc_should_kick(pblk); | |
1294 | ||
a4bd217b JG |
1295 | return pblk; |
1296 | ||
1297 | fail_stop_writer: | |
1298 | pblk_writer_stop(pblk); | |
a4bd217b JG |
1299 | fail_free_l2p: |
1300 | pblk_l2p_free(pblk); | |
43d47127 JG |
1301 | fail_free_rwb: |
1302 | pblk_rwb_free(pblk); | |
1303 | fail_free_lines: | |
1304 | pblk_lines_free(pblk); | |
a4bd217b JG |
1305 | fail_free_core: |
1306 | pblk_core_free(pblk); | |
a4bd217b JG |
1307 | fail: |
1308 | kfree(pblk); | |
1309 | return ERR_PTR(ret); | |
1310 | } | |
1311 | ||
1312 | /* physical block device target */ | |
1313 | static struct nvm_tgt_type tt_pblk = { | |
1314 | .name = "pblk", | |
1315 | .version = {1, 0, 0}, | |
1316 | ||
1317 | .make_rq = pblk_make_rq, | |
1318 | .capacity = pblk_capacity, | |
1319 | ||
1320 | .init = pblk_init, | |
1321 | .exit = pblk_exit, | |
1322 | ||
1323 | .sysfs_init = pblk_sysfs_init, | |
1324 | .sysfs_exit = pblk_sysfs_exit, | |
90014829 | 1325 | .owner = THIS_MODULE, |
a4bd217b JG |
1326 | }; |
1327 | ||
1328 | static int __init pblk_module_init(void) | |
1329 | { | |
b25d5237 N |
1330 | int ret; |
1331 | ||
b906bbb6 KO |
1332 | ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0); |
1333 | if (ret) | |
1334 | return ret; | |
b25d5237 N |
1335 | ret = nvm_register_tgt_type(&tt_pblk); |
1336 | if (ret) | |
b906bbb6 | 1337 | bioset_exit(&pblk_bio_set); |
b25d5237 | 1338 | return ret; |
a4bd217b JG |
1339 | } |
1340 | ||
1341 | static void pblk_module_exit(void) | |
1342 | { | |
b906bbb6 | 1343 | bioset_exit(&pblk_bio_set); |
a4bd217b JG |
1344 | nvm_unregister_tgt_type(&tt_pblk); |
1345 | } | |
1346 | ||
1347 | module_init(pblk_module_init); | |
1348 | module_exit(pblk_module_exit); | |
1349 | MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>"); | |
1350 | MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>"); | |
1351 | MODULE_LICENSE("GPL v2"); | |
1352 | MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs"); |