Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6-block.git] / drivers / lightnvm / pblk-init.c
CommitLineData
02a1520d 1// SPDX-License-Identifier: GPL-2.0
a4bd217b
JG
2/*
3 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
4 * Copyright (C) 2016 CNEX Labs
5 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
6 * Matias Bjorling <matias@cnexlabs.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * Implementation of a physical block-device target for Open-channel SSDs.
18 *
19 * pblk-init.c - pblk's initialization.
20 */
21
22#include "pblk.h"
4c44abf4 23#include "pblk-trace.h"
a4bd217b 24
21ff1399 25static unsigned int write_buffer_size;
4a828884
MD
26
27module_param(write_buffer_size, uint, 0644);
28MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
29
1864de94
HH
30struct pblk_global_caches {
31 struct kmem_cache *ws;
32 struct kmem_cache *rec;
33 struct kmem_cache *g_rq;
34 struct kmem_cache *w_rq;
35
36 struct kref kref;
37
38 struct mutex mutex; /* Ensures consistency between
39 * caches and kref
40 */
41};
42
43static struct pblk_global_caches pblk_caches = {
44 .mutex = __MUTEX_INITIALIZER(pblk_caches.mutex),
45 .kref = KREF_INIT(0),
46};
47
b906bbb6 48struct bio_set pblk_bio_set;
a4bd217b 49
a4bd217b
JG
50static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
51{
52 struct pblk *pblk = q->queuedata;
53
54 if (bio_op(bio) == REQ_OP_DISCARD) {
55 pblk_discard(pblk, bio);
56 if (!(bio->bi_opf & REQ_PREFLUSH)) {
57 bio_endio(bio);
58 return BLK_QC_T_NONE;
59 }
60 }
61
05038712
CK
62 /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
63 * constraint. Writes can be of arbitrary size.
64 */
65 if (bio_data_dir(bio) == READ) {
66 blk_queue_split(q, &bio);
3e03f632 67 pblk_submit_read(pblk, bio);
05038712
CK
68 } else {
69 /* Prevent deadlock in the case of a modest LUN configuration
70 * and large user I/Os. Unless stalled, the rate limiter
71 * leaves at least 256KB available for user I/O.
72 */
73 if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
74 blk_queue_split(q, &bio);
75
3e03f632 76 pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
a4bd217b
JG
77 }
78
79 return BLK_QC_T_NONE;
80}
81
c5586192
HH
82static size_t pblk_trans_map_size(struct pblk *pblk)
83{
84 int entry_size = 8;
85
bb845ae4 86 if (pblk->addrf_len < 32)
c5586192
HH
87 entry_size = 4;
88
847a3a27 89 return entry_size * pblk->capacity;
c5586192
HH
90}
91
880eda54 92#ifdef CONFIG_NVM_PBLK_DEBUG
c5586192
HH
93static u32 pblk_l2p_crc(struct pblk *pblk)
94{
95 size_t map_size;
96 u32 crc = ~(u32)0;
97
98 map_size = pblk_trans_map_size(pblk);
99 crc = crc32_le(crc, pblk->trans_map, map_size);
100 return crc;
101}
102#endif
103
a4bd217b
JG
104static void pblk_l2p_free(struct pblk *pblk)
105{
106 vfree(pblk->trans_map);
107}
108
43d47127
JG
109static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
110{
111 struct pblk_line *line = NULL;
112
113 if (factory_init) {
7e0a0847 114 guid_gen(&pblk->instance_uuid);
43d47127
JG
115 } else {
116 line = pblk_recov_l2p(pblk);
117 if (IS_ERR(line)) {
4e495a46 118 pblk_err(pblk, "could not recover l2p table\n");
43d47127
JG
119 return -EFAULT;
120 }
121 }
122
880eda54 123#ifdef CONFIG_NVM_PBLK_DEBUG
4e495a46 124 pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
43d47127
JG
125#endif
126
127 /* Free full lines directly as GC has not been started yet */
128 pblk_gc_free_full_lines(pblk);
129
130 if (!line) {
131 /* Configure next line for user data */
132 line = pblk_line_get_first_data(pblk);
1d8b33e0 133 if (!line)
43d47127 134 return -EFAULT;
43d47127
JG
135 }
136
137 return 0;
138}
139
140static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
a4bd217b
JG
141{
142 sector_t i;
143 struct ppa_addr ppa;
c5586192 144 size_t map_size;
1d8b33e0 145 int ret = 0;
a4bd217b 146
c5586192 147 map_size = pblk_trans_map_size(pblk);
6e46b8b2
IK
148 pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN
149 | __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM,
150 PAGE_KERNEL);
151 if (!pblk->trans_map) {
152 pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
153 map_size);
a4bd217b 154 return -ENOMEM;
6e46b8b2 155 }
a4bd217b
JG
156
157 pblk_ppa_set_empty(&ppa);
158
847a3a27 159 for (i = 0; i < pblk->capacity; i++)
a4bd217b
JG
160 pblk_trans_map_set(pblk, i, ppa);
161
1d8b33e0
JG
162 ret = pblk_l2p_recover(pblk, factory_init);
163 if (ret)
164 vfree(pblk->trans_map);
165
166 return ret;
a4bd217b
JG
167}
168
169static void pblk_rwb_free(struct pblk *pblk)
170{
171 if (pblk_rb_tear_down_check(&pblk->rwb))
4e495a46 172 pblk_err(pblk, "write buffer error on tear down\n");
a4bd217b 173
9bd1f875 174 pblk_rb_free(&pblk->rwb);
a4bd217b
JG
175}
176
177static int pblk_rwb_init(struct pblk *pblk)
178{
179 struct nvm_tgt_dev *dev = pblk->dev;
180 struct nvm_geo *geo = &dev->geo;
9bd1f875 181 unsigned long buffer_size;
766c8ceb 182 int pgs_in_buffer, threshold;
a4bd217b 183
766c8ceb 184 threshold = geo->mw_cunits * geo->all_luns;
d672d92d
JG
185 pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt)
186 * geo->all_luns;
ffc03fb7
MD
187
188 if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
4a828884
MD
189 buffer_size = write_buffer_size;
190 else
ffc03fb7 191 buffer_size = pgs_in_buffer;
4a828884 192
766c8ceb 193 return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs);
a4bd217b
JG
194}
195
4e495a46
MB
196static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
197 struct nvm_addrf_12 *dst)
a4bd217b 198{
e46f4e48
JG
199 struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
200 int power_len;
a4bd217b
JG
201
202 /* Re-calculate channel and lun format to adapt to configuration */
a40afad9
JG
203 power_len = get_count_order(geo->num_ch);
204 if (1 << power_len != geo->num_ch) {
4e495a46 205 pblk_err(pblk, "supports only power-of-two channel config.\n");
a4bd217b
JG
206 return -EINVAL;
207 }
e46f4e48 208 dst->ch_len = power_len;
a4bd217b 209
a40afad9
JG
210 power_len = get_count_order(geo->num_lun);
211 if (1 << power_len != geo->num_lun) {
4e495a46 212 pblk_err(pblk, "supports only power-of-two LUN config.\n");
a4bd217b
JG
213 return -EINVAL;
214 }
e46f4e48
JG
215 dst->lun_len = power_len;
216
217 dst->blk_len = src->blk_len;
218 dst->pg_len = src->pg_len;
219 dst->pln_len = src->pln_len;
a40afad9 220 dst->sec_len = src->sec_len;
e46f4e48 221
a40afad9
JG
222 dst->sec_offset = 0;
223 dst->pln_offset = dst->sec_len;
e46f4e48
JG
224 dst->ch_offset = dst->pln_offset + dst->pln_len;
225 dst->lun_offset = dst->ch_offset + dst->ch_len;
226 dst->pg_offset = dst->lun_offset + dst->lun_len;
227 dst->blk_offset = dst->pg_offset + dst->pg_len;
228
a40afad9 229 dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
e46f4e48
JG
230 dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
231 dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
232 dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
233 dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
234 dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
235
236 return dst->blk_offset + src->blk_len;
237}
238
3b2a3ad1
JG
239static int pblk_set_addrf_20(struct nvm_geo *geo, struct nvm_addrf *adst,
240 struct pblk_addrf *udst)
241{
242 struct nvm_addrf *src = &geo->addrf;
243
244 adst->ch_len = get_count_order(geo->num_ch);
245 adst->lun_len = get_count_order(geo->num_lun);
246 adst->chk_len = src->chk_len;
247 adst->sec_len = src->sec_len;
248
249 adst->sec_offset = 0;
250 adst->ch_offset = adst->sec_len;
251 adst->lun_offset = adst->ch_offset + adst->ch_len;
252 adst->chk_offset = adst->lun_offset + adst->lun_len;
253
254 adst->sec_mask = ((1ULL << adst->sec_len) - 1) << adst->sec_offset;
255 adst->chk_mask = ((1ULL << adst->chk_len) - 1) << adst->chk_offset;
256 adst->lun_mask = ((1ULL << adst->lun_len) - 1) << adst->lun_offset;
257 adst->ch_mask = ((1ULL << adst->ch_len) - 1) << adst->ch_offset;
258
259 udst->sec_stripe = geo->ws_opt;
260 udst->ch_stripe = geo->num_ch;
261 udst->lun_stripe = geo->num_lun;
262
263 udst->sec_lun_stripe = udst->sec_stripe * udst->ch_stripe;
264 udst->sec_ws_stripe = udst->sec_lun_stripe * udst->lun_stripe;
265
266 return adst->chk_offset + adst->chk_len;
267}
268
bb845ae4 269static int pblk_set_addrf(struct pblk *pblk)
e46f4e48
JG
270{
271 struct nvm_tgt_dev *dev = pblk->dev;
272 struct nvm_geo *geo = &dev->geo;
273 int mod;
274
3b2a3ad1
JG
275 switch (geo->version) {
276 case NVM_OCSSD_SPEC_12:
277 div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
278 if (mod) {
4e495a46 279 pblk_err(pblk, "bad configuration of sectors/pages\n");
3b2a3ad1
JG
280 return -EINVAL;
281 }
282
4e495a46
MB
283 pblk->addrf_len = pblk_set_addrf_12(pblk, geo,
284 (void *)&pblk->addrf);
3b2a3ad1
JG
285 break;
286 case NVM_OCSSD_SPEC_20:
287 pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
4e495a46 288 &pblk->uaddrf);
3b2a3ad1
JG
289 break;
290 default:
4e495a46 291 pblk_err(pblk, "OCSSD revision not supported (%d)\n",
3b2a3ad1 292 geo->version);
e46f4e48
JG
293 return -EINVAL;
294 }
295
a4bd217b
JG
296 return 0;
297}
298
1864de94 299static int pblk_create_global_caches(void)
a4bd217b 300{
1864de94
HH
301
302 pblk_caches.ws = kmem_cache_create("pblk_blk_ws",
a4bd217b 303 sizeof(struct pblk_line_ws), 0, 0, NULL);
1864de94 304 if (!pblk_caches.ws)
a4bd217b 305 return -ENOMEM;
a4bd217b 306
1864de94 307 pblk_caches.rec = kmem_cache_create("pblk_rec",
a4bd217b 308 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
1864de94
HH
309 if (!pblk_caches.rec)
310 goto fail_destroy_ws;
a4bd217b 311
1864de94 312 pblk_caches.g_rq = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
a4bd217b 313 0, 0, NULL);
1864de94
HH
314 if (!pblk_caches.g_rq)
315 goto fail_destroy_rec;
a4bd217b 316
1864de94 317 pblk_caches.w_rq = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
a4bd217b 318 0, 0, NULL);
1864de94
HH
319 if (!pblk_caches.w_rq)
320 goto fail_destroy_g_rq;
a4bd217b
JG
321
322 return 0;
1864de94
HH
323
324fail_destroy_g_rq:
325 kmem_cache_destroy(pblk_caches.g_rq);
326fail_destroy_rec:
327 kmem_cache_destroy(pblk_caches.rec);
328fail_destroy_ws:
329 kmem_cache_destroy(pblk_caches.ws);
330
331 return -ENOMEM;
332}
333
334static int pblk_get_global_caches(void)
335{
42bd0384 336 int ret = 0;
1864de94
HH
337
338 mutex_lock(&pblk_caches.mutex);
339
42bd0384
JG
340 if (kref_get_unless_zero(&pblk_caches.kref))
341 goto out;
1864de94
HH
342
343 ret = pblk_create_global_caches();
1864de94 344 if (!ret)
42bd0384 345 kref_init(&pblk_caches.kref);
1864de94 346
42bd0384 347out:
1864de94 348 mutex_unlock(&pblk_caches.mutex);
1864de94
HH
349 return ret;
350}
351
352static void pblk_destroy_global_caches(struct kref *ref)
353{
354 struct pblk_global_caches *c;
355
356 c = container_of(ref, struct pblk_global_caches, kref);
357
358 kmem_cache_destroy(c->ws);
359 kmem_cache_destroy(c->rec);
360 kmem_cache_destroy(c->g_rq);
361 kmem_cache_destroy(c->w_rq);
a4bd217b
JG
362}
363
1864de94 364static void pblk_put_global_caches(void)
22a4e061 365{
1864de94
HH
366 mutex_lock(&pblk_caches.mutex);
367 kref_put(&pblk_caches.kref, pblk_destroy_global_caches);
368 mutex_unlock(&pblk_caches.mutex);
22a4e061
RP
369}
370
a4bd217b
JG
371static int pblk_core_init(struct pblk *pblk)
372{
373 struct nvm_tgt_dev *dev = pblk->dev;
374 struct nvm_geo *geo = &dev->geo;
b906bbb6 375 int ret, max_write_ppas;
43d47127
JG
376
377 atomic64_set(&pblk->user_wa, 0);
378 atomic64_set(&pblk->pad_wa, 0);
379 atomic64_set(&pblk->gc_wa, 0);
380 pblk->user_rst_wa = 0;
381 pblk->pad_rst_wa = 0;
382 pblk->gc_rst_wa = 0;
383
384 atomic64_set(&pblk->nr_flush, 0);
385 pblk->nr_flush_rst = 0;
a4bd217b 386
8bbd45d0 387 pblk->min_write_pgs = geo->ws_opt;
55d8ec35 388 pblk->min_write_pgs_data = pblk->min_write_pgs;
43d47127
JG
389 max_write_ppas = pblk->min_write_pgs * geo->all_luns;
390 pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
8a57fc38
ZW
391 pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
392 queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
43d47127
JG
393 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
394
faa79f27 395 pblk->oob_meta_size = geo->sos;
55d8ec35
IK
396 if (!pblk_is_oob_meta_supported(pblk)) {
397 /* For drives which does not have OOB metadata feature
398 * in order to support recovery feature we need to use
399 * so called packed metadata. Packed metada will store
400 * the same information as OOB metadata (l2p table mapping,
401 * but in the form of the single page at the end of
402 * every write request.
403 */
404 if (pblk->min_write_pgs
405 * sizeof(struct pblk_sec_meta) > PAGE_SIZE) {
406 /* We want to keep all the packed metadata on single
407 * page per write requests. So we need to ensure that
408 * it will fit.
409 *
410 * This is more like sanity check, since there is
411 * no device with such a big minimal write size
412 * (above 1 metabytes).
413 */
414 pblk_err(pblk, "Not supported min write size\n");
415 return -EINVAL;
416 }
417 /* For packed meta approach we do some simplification.
418 * On read path we always issue requests which size
419 * equal to max_write_pgs, with all pages filled with
420 * user payload except of last one page which will be
421 * filled with packed metadata.
422 */
423 pblk->max_write_pgs = pblk->min_write_pgs;
424 pblk->min_write_pgs_data = pblk->min_write_pgs - 1;
faa79f27
IK
425 }
426
6396bb22 427 pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
43d47127
JG
428 GFP_KERNEL);
429 if (!pblk->pad_dist)
a4bd217b
JG
430 return -ENOMEM;
431
1864de94 432 if (pblk_get_global_caches())
43d47127
JG
433 goto fail_free_pad_dist;
434
b84ae4a8 435 /* Internal bios can be at most the sectors signaled by the device. */
b906bbb6
KO
436 ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0);
437 if (ret)
22a4e061 438 goto free_global_caches;
a4bd217b 439
b906bbb6 440 ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
1864de94 441 pblk_caches.ws);
b906bbb6 442 if (ret)
bd432417 443 goto free_page_bio_pool;
a4bd217b 444
b906bbb6 445 ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
1864de94 446 pblk_caches.rec);
b906bbb6 447 if (ret)
b84ae4a8 448 goto free_gen_ws_pool;
a4bd217b 449
b906bbb6 450 ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
1864de94 451 pblk_caches.g_rq);
b906bbb6 452 if (ret)
a4bd217b
JG
453 goto free_rec_pool;
454
b906bbb6 455 ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
1864de94 456 pblk_caches.g_rq);
b906bbb6 457 if (ret)
0d880398
JG
458 goto free_r_rq_pool;
459
b906bbb6 460 ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
1864de94 461 pblk_caches.w_rq);
b906bbb6 462 if (ret)
0d880398 463 goto free_e_rq_pool;
a4bd217b 464
ef576494
JG
465 pblk->close_wq = alloc_workqueue("pblk-close-wq",
466 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
467 if (!pblk->close_wq)
e72ec1d3 468 goto free_w_rq_pool;
a4bd217b 469
ef576494
JG
470 pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
471 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
472 if (!pblk->bb_wq)
473 goto free_close_wq;
474
7bd4d370
JG
475 pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
476 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
477 if (!pblk->r_end_wq)
ef576494 478 goto free_bb_wq;
a4bd217b 479
bb845ae4 480 if (pblk_set_addrf(pblk))
7bd4d370
JG
481 goto free_r_end_wq;
482
a4bd217b 483 INIT_LIST_HEAD(&pblk->compl_list);
6a3abf5b 484 INIT_LIST_HEAD(&pblk->resubmit_list);
43d47127 485
a4bd217b
JG
486 return 0;
487
7bd4d370
JG
488free_r_end_wq:
489 destroy_workqueue(pblk->r_end_wq);
ef576494
JG
490free_bb_wq:
491 destroy_workqueue(pblk->bb_wq);
492free_close_wq:
493 destroy_workqueue(pblk->close_wq);
a4bd217b 494free_w_rq_pool:
b906bbb6 495 mempool_exit(&pblk->w_rq_pool);
0d880398 496free_e_rq_pool:
b906bbb6 497 mempool_exit(&pblk->e_rq_pool);
0d880398 498free_r_rq_pool:
b906bbb6 499 mempool_exit(&pblk->r_rq_pool);
a4bd217b 500free_rec_pool:
b906bbb6 501 mempool_exit(&pblk->rec_pool);
b84ae4a8 502free_gen_ws_pool:
b906bbb6 503 mempool_exit(&pblk->gen_ws_pool);
bd432417 504free_page_bio_pool:
b906bbb6 505 mempool_exit(&pblk->page_bio_pool);
22a4e061 506free_global_caches:
1864de94 507 pblk_put_global_caches();
43d47127
JG
508fail_free_pad_dist:
509 kfree(pblk->pad_dist);
a4bd217b
JG
510 return -ENOMEM;
511}
512
513static void pblk_core_free(struct pblk *pblk)
514{
ef576494
JG
515 if (pblk->close_wq)
516 destroy_workqueue(pblk->close_wq);
517
7bd4d370
JG
518 if (pblk->r_end_wq)
519 destroy_workqueue(pblk->r_end_wq);
520
ef576494
JG
521 if (pblk->bb_wq)
522 destroy_workqueue(pblk->bb_wq);
a4bd217b 523
b906bbb6
KO
524 mempool_exit(&pblk->page_bio_pool);
525 mempool_exit(&pblk->gen_ws_pool);
526 mempool_exit(&pblk->rec_pool);
527 mempool_exit(&pblk->r_rq_pool);
528 mempool_exit(&pblk->e_rq_pool);
529 mempool_exit(&pblk->w_rq_pool);
a4bd217b 530
1864de94 531 pblk_put_global_caches();
43d47127 532 kfree(pblk->pad_dist);
a4bd217b
JG
533}
534
e411b331
JG
535static void pblk_line_mg_free(struct pblk *pblk)
536{
537 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
538 int i;
539
540 kfree(l_mg->bb_template);
541 kfree(l_mg->bb_aux);
542 kfree(l_mg->vsc_list);
543
544 for (i = 0; i < PBLK_DATA_LINES; i++) {
545 kfree(l_mg->sline_meta[i]);
ff8f3520 546 kvfree(l_mg->eline_meta[i]->buf);
e411b331
JG
547 kfree(l_mg->eline_meta[i]);
548 }
53d82db6
HH
549
550 mempool_destroy(l_mg->bitmap_pool);
551 kmem_cache_destroy(l_mg->bitmap_cache);
e411b331
JG
552}
553
48b8d208
HH
554static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
555 struct pblk_line *line)
dffdd960 556{
48b8d208
HH
557 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
558
dffdd960
JG
559 kfree(line->blk_bitmap);
560 kfree(line->erase_bitmap);
32ef9412 561 kfree(line->chks);
48b8d208 562
ff8f3520 563 kvfree(w_err_gc->lba_list);
48b8d208 564 kfree(w_err_gc);
dffdd960
JG
565}
566
a4bd217b
JG
567static void pblk_lines_free(struct pblk *pblk)
568{
569 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
570 struct pblk_line *line;
571 int i;
572
a4bd217b
JG
573 for (i = 0; i < l_mg->nr_lines; i++) {
574 line = &pblk->lines[i];
575
8e55c07b 576 pblk_line_free(line);
48b8d208 577 pblk_line_meta_free(l_mg, line);
a4bd217b 578 }
43d47127
JG
579
580 pblk_line_mg_free(pblk);
581
582 kfree(pblk->luns);
583 kfree(pblk->lines);
a4bd217b
JG
584}
585
43d47127 586static int pblk_luns_init(struct pblk *pblk)
a4bd217b
JG
587{
588 struct nvm_tgt_dev *dev = pblk->dev;
589 struct nvm_geo *geo = &dev->geo;
590 struct pblk_lun *rlun;
e411b331 591 int i;
a4bd217b
JG
592
593 /* TODO: Implement unbalanced LUN support */
a40afad9 594 if (geo->num_lun < 0) {
4e495a46 595 pblk_err(pblk, "unbalanced LUN config.\n");
a4bd217b
JG
596 return -EINVAL;
597 }
598
fae7fae4
MB
599 pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
600 GFP_KERNEL);
a4bd217b
JG
601 if (!pblk->luns)
602 return -ENOMEM;
603
fae7fae4 604 for (i = 0; i < geo->all_luns; i++) {
a4bd217b 605 /* Stripe across channels */
a40afad9
JG
606 int ch = i % geo->num_ch;
607 int lun_raw = i / geo->num_ch;
608 int lunid = lun_raw + ch * geo->num_lun;
a4bd217b
JG
609
610 rlun = &pblk->luns[i];
43d47127 611 rlun->bppa = dev->luns[lunid];
a4bd217b
JG
612
613 sema_init(&rlun->wr_sem, 1);
a4bd217b
JG
614 }
615
616 return 0;
617}
618
a4bd217b 619/* See comment over struct line_emeta definition */
dd2a4343 620static unsigned int calc_emeta_len(struct pblk *pblk)
a4bd217b 621{
dd2a4343
JG
622 struct pblk_line_meta *lm = &pblk->lm;
623 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
624 struct nvm_tgt_dev *dev = pblk->dev;
625 struct nvm_geo *geo = &dev->geo;
626
627 /* Round to sector size so that lba_list starts on its own sector */
628 lm->emeta_sec[1] = DIV_ROUND_UP(
76758390 629 sizeof(struct line_emeta) + lm->blk_bitmap_len +
e46f4e48
JG
630 sizeof(struct wa_counters), geo->csecs);
631 lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs;
dd2a4343
JG
632
633 /* Round to sector size so that vsc_list starts on its own sector */
634 lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
635 lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
e46f4e48
JG
636 geo->csecs);
637 lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs;
dd2a4343
JG
638
639 lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
e46f4e48
JG
640 geo->csecs);
641 lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs;
dd2a4343
JG
642
643 lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
644
645 return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
a4bd217b
JG
646}
647
3bcebc5b 648static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
a4bd217b
JG
649{
650 struct nvm_tgt_dev *dev = pblk->dev;
a7689938
JG
651 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
652 struct pblk_line_meta *lm = &pblk->lm;
a4bd217b
JG
653 struct nvm_geo *geo = &dev->geo;
654 sector_t provisioned;
55d8ec35 655 int sec_meta, blk_meta, clba;
3bcebc5b 656 int minimum;
a4bd217b 657
e5392739
JG
658 if (geo->op == NVM_TARGET_DEFAULT_OP)
659 pblk->op = PBLK_DEFAULT_OP;
660 else
661 pblk->op = geo->op;
a4bd217b 662
3bcebc5b
HH
663 minimum = pblk_get_min_chks(pblk);
664 provisioned = nr_free_chks;
a7689938 665 provisioned *= (100 - pblk->op);
a4bd217b
JG
666 sector_div(provisioned, 100);
667
3bcebc5b
HH
668 if ((nr_free_chks - provisioned) < minimum) {
669 if (geo->op != NVM_TARGET_DEFAULT_OP) {
670 pblk_err(pblk, "OP too small to create a sane instance\n");
671 return -EINTR;
672 }
673
674 /* If the user did not specify an OP value, and PBLK_DEFAULT_OP
675 * is not enough, calculate and set sane value
676 */
677
678 provisioned = nr_free_chks - minimum;
679 pblk->op = (100 * minimum) / nr_free_chks;
680 pblk_info(pblk, "Default OP insufficient, adjusting OP to %d\n",
681 pblk->op);
682 }
683
684 pblk->op_blks = nr_free_chks - provisioned;
a7689938 685
a4bd217b
JG
686 /* Internally pblk manages all free blocks, but all calculations based
687 * on user capacity consider only provisioned blocks
688 */
3bcebc5b 689 pblk->rl.total_blocks = nr_free_chks;
a7689938
JG
690
691 /* Consider sectors used for metadata */
692 sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
e46f4e48 693 blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
a7689938 694
55d8ec35
IK
695 clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data;
696 pblk->capacity = (provisioned - blk_meta) * clba;
a7689938 697
3bcebc5b
HH
698 atomic_set(&pblk->rl.free_blocks, nr_free_chks);
699 atomic_set(&pblk->rl.free_user_blocks, nr_free_chks);
700
701 return 0;
a4bd217b
JG
702}
703
aff3fb18 704static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
32ef9412
JG
705 struct nvm_chk_meta *meta)
706{
707 struct nvm_tgt_dev *dev = pblk->dev;
708 struct nvm_geo *geo = &dev->geo;
709 struct pblk_line_meta *lm = &pblk->lm;
710 int i, nr_bad_chks = 0;
711
712 for (i = 0; i < lm->blk_per_line; i++) {
713 struct pblk_lun *rlun = &pblk->luns[i];
714 struct nvm_chk_meta *chunk;
715 struct nvm_chk_meta *chunk_meta;
716 struct ppa_addr ppa;
717 int pos;
718
719 ppa = rlun->bppa;
720 pos = pblk_ppa_to_pos(geo, ppa);
721 chunk = &line->chks[pos];
722
723 ppa.m.chk = line->id;
724 chunk_meta = pblk_chunk_get_off(pblk, meta, ppa);
725
726 chunk->state = chunk_meta->state;
727 chunk->type = chunk_meta->type;
728 chunk->wi = chunk_meta->wi;
729 chunk->slba = chunk_meta->slba;
730 chunk->cnlb = chunk_meta->cnlb;
731 chunk->wp = chunk_meta->wp;
732
4c44abf4
HH
733 trace_pblk_chunk_state(pblk_disk_name(pblk), &ppa,
734 chunk->state);
735
32ef9412
JG
736 if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
737 WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
738 continue;
739 }
740
6f9c9607
JG
741 if (!(chunk->state & NVM_CHK_ST_OFFLINE))
742 continue;
743
32ef9412
JG
744 set_bit(pos, line->blk_bitmap);
745 nr_bad_chks++;
746 }
747
748 return nr_bad_chks;
749}
750
751static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
752 void *chunk_meta, int line_id)
753{
32ef9412
JG
754 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
755 struct pblk_line_meta *lm = &pblk->lm;
756 long nr_bad_chks, chk_in_line;
757
758 line->pblk = pblk;
759 line->id = line_id;
760 line->type = PBLK_LINETYPE_FREE;
761 line->state = PBLK_LINESTATE_NEW;
762 line->gc_group = PBLK_LINEGC_NONE;
763 line->vsc = &l_mg->vsc_list[line_id];
764 spin_lock_init(&line->lock);
765
aff3fb18 766 nr_bad_chks = pblk_setup_line_meta_chk(pblk, line, chunk_meta);
32ef9412
JG
767
768 chk_in_line = lm->blk_per_line - nr_bad_chks;
769 if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
770 chk_in_line < lm->min_blk_line) {
771 line->state = PBLK_LINESTATE_BAD;
772 list_add_tail(&line->list, &l_mg->bad_list);
773 return 0;
774 }
775
776 atomic_set(&line->blk_in_line, chk_in_line);
777 list_add_tail(&line->list, &l_mg->free_list);
778 l_mg->nr_free_lines++;
779
780 return chk_in_line;
781}
782
783static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line)
43d47127
JG
784{
785 struct pblk_line_meta *lm = &pblk->lm;
786
787 line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
788 if (!line->blk_bitmap)
789 return -ENOMEM;
790
791 line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
48b8d208
HH
792 if (!line->erase_bitmap)
793 goto free_blk_bitmap;
794
43d47127 795
6da2ec56
KC
796 line->chks = kmalloc_array(lm->blk_per_line,
797 sizeof(struct nvm_chk_meta), GFP_KERNEL);
48b8d208
HH
798 if (!line->chks)
799 goto free_erase_bitmap;
800
801 line->w_err_gc = kzalloc(sizeof(struct pblk_w_err_gc), GFP_KERNEL);
802 if (!line->w_err_gc)
803 goto free_chks;
43d47127
JG
804
805 return 0;
48b8d208
HH
806
807free_chks:
808 kfree(line->chks);
809free_erase_bitmap:
810 kfree(line->erase_bitmap);
811free_blk_bitmap:
812 kfree(line->blk_bitmap);
813 return -ENOMEM;
43d47127
JG
814}
815
816static int pblk_line_mg_init(struct pblk *pblk)
dd2a4343 817{
43d47127
JG
818 struct nvm_tgt_dev *dev = pblk->dev;
819 struct nvm_geo *geo = &dev->geo;
dd2a4343
JG
820 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
821 struct pblk_line_meta *lm = &pblk->lm;
43d47127
JG
822 int i, bb_distance;
823
a40afad9 824 l_mg->nr_lines = geo->num_chk;
43d47127
JG
825 l_mg->log_line = l_mg->data_line = NULL;
826 l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
827 l_mg->nr_free_lines = 0;
828 bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
829
830 INIT_LIST_HEAD(&l_mg->free_list);
831 INIT_LIST_HEAD(&l_mg->corrupt_list);
832 INIT_LIST_HEAD(&l_mg->bad_list);
833 INIT_LIST_HEAD(&l_mg->gc_full_list);
834 INIT_LIST_HEAD(&l_mg->gc_high_list);
835 INIT_LIST_HEAD(&l_mg->gc_mid_list);
836 INIT_LIST_HEAD(&l_mg->gc_low_list);
837 INIT_LIST_HEAD(&l_mg->gc_empty_list);
48b8d208 838 INIT_LIST_HEAD(&l_mg->gc_werr_list);
43d47127
JG
839
840 INIT_LIST_HEAD(&l_mg->emeta_list);
841
48b8d208
HH
842 l_mg->gc_lists[0] = &l_mg->gc_werr_list;
843 l_mg->gc_lists[1] = &l_mg->gc_high_list;
844 l_mg->gc_lists[2] = &l_mg->gc_mid_list;
845 l_mg->gc_lists[3] = &l_mg->gc_low_list;
43d47127
JG
846
847 spin_lock_init(&l_mg->free_lock);
848 spin_lock_init(&l_mg->close_lock);
849 spin_lock_init(&l_mg->gc_lock);
850
851 l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
852 if (!l_mg->vsc_list)
853 goto fail;
854
855 l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
856 if (!l_mg->bb_template)
857 goto fail_free_vsc_list;
858
859 l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
860 if (!l_mg->bb_aux)
861 goto fail_free_bb_template;
dd2a4343
JG
862
863 /* smeta is always small enough to fit on a kmalloc memory allocation,
864 * emeta depends on the number of LUNs allocated to the pblk instance
865 */
dd2a4343
JG
866 for (i = 0; i < PBLK_DATA_LINES; i++) {
867 l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
868 if (!l_mg->sline_meta[i])
869 goto fail_free_smeta;
870 }
871
53d82db6
HH
872 l_mg->bitmap_cache = kmem_cache_create("pblk_lm_bitmap",
873 lm->sec_bitmap_len, 0, 0, NULL);
874 if (!l_mg->bitmap_cache)
875 goto fail_free_smeta;
876
877 /* the bitmap pool is used for both valid and map bitmaps */
878 l_mg->bitmap_pool = mempool_create_slab_pool(PBLK_DATA_LINES * 2,
879 l_mg->bitmap_cache);
880 if (!l_mg->bitmap_pool)
881 goto fail_destroy_bitmap_cache;
882
dd2a4343
JG
883 /* emeta allocates three different buffers for managing metadata with
884 * in-memory and in-media layouts
885 */
886 for (i = 0; i < PBLK_DATA_LINES; i++) {
887 struct pblk_emeta *emeta;
888
889 emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
890 if (!emeta)
891 goto fail_free_emeta;
892
ff8f3520
HH
893 emeta->buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
894 if (!emeta->buf) {
895 kfree(emeta);
896 goto fail_free_emeta;
dd2a4343 897 }
ff8f3520
HH
898
899 emeta->nr_entries = lm->emeta_sec[0];
900 l_mg->eline_meta[i] = emeta;
dd2a4343
JG
901 }
902
dd2a4343
JG
903 for (i = 0; i < l_mg->nr_lines; i++)
904 l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
905
43d47127
JG
906 bb_distance = (geo->all_luns) * geo->ws_opt;
907 for (i = 0; i < lm->sec_per_line; i += bb_distance)
908 bitmap_set(l_mg->bb_template, i, geo->ws_opt);
909
dd2a4343
JG
910 return 0;
911
912fail_free_emeta:
913 while (--i >= 0) {
ff8f3520 914 kvfree(l_mg->eline_meta[i]->buf);
f680f19a 915 kfree(l_mg->eline_meta[i]);
dd2a4343 916 }
53d82db6
HH
917
918 mempool_destroy(l_mg->bitmap_pool);
919fail_destroy_bitmap_cache:
920 kmem_cache_destroy(l_mg->bitmap_cache);
dd2a4343
JG
921fail_free_smeta:
922 for (i = 0; i < PBLK_DATA_LINES; i++)
f680f19a 923 kfree(l_mg->sline_meta[i]);
43d47127
JG
924 kfree(l_mg->bb_aux);
925fail_free_bb_template:
926 kfree(l_mg->bb_template);
927fail_free_vsc_list:
928 kfree(l_mg->vsc_list);
929fail:
dd2a4343
JG
930 return -ENOMEM;
931}
932
43d47127 933static int pblk_line_meta_init(struct pblk *pblk)
a4bd217b
JG
934{
935 struct nvm_tgt_dev *dev = pblk->dev;
936 struct nvm_geo *geo = &dev->geo;
a4bd217b 937 struct pblk_line_meta *lm = &pblk->lm;
a4bd217b 938 unsigned int smeta_len, emeta_len;
43d47127 939 int i;
a4bd217b 940
e46f4e48 941 lm->sec_per_line = geo->clba * geo->all_luns;
fae7fae4
MB
942 lm->blk_per_line = geo->all_luns;
943 lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
a4bd217b 944 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
fae7fae4 945 lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
27b97872
RP
946 lm->mid_thrs = lm->sec_per_line / 2;
947 lm->high_thrs = lm->sec_per_line / 4;
fae7fae4 948 lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
a4bd217b
JG
949
950 /* Calculate necessary pages for smeta. See comment over struct
951 * line_smeta definition
952 */
a4bd217b
JG
953 i = 1;
954add_smeta_page:
e46f4e48
JG
955 lm->smeta_sec = i * geo->ws_opt;
956 lm->smeta_len = lm->smeta_sec * geo->csecs;
a4bd217b 957
dd2a4343 958 smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
a4bd217b
JG
959 if (smeta_len > lm->smeta_len) {
960 i++;
961 goto add_smeta_page;
962 }
963
964 /* Calculate necessary pages for emeta. See comment over struct
965 * line_emeta definition
966 */
967 i = 1;
968add_emeta_page:
e46f4e48
JG
969 lm->emeta_sec[0] = i * geo->ws_opt;
970 lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs;
a4bd217b 971
dd2a4343
JG
972 emeta_len = calc_emeta_len(pblk);
973 if (emeta_len > lm->emeta_len[0]) {
a4bd217b
JG
974 i++;
975 goto add_emeta_page;
976 }
a4bd217b 977
fae7fae4 978 lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
21d22871
JG
979
980 lm->min_blk_line = 1;
fae7fae4 981 if (geo->all_luns > 1)
21d22871 982 lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
e46f4e48 983 lm->emeta_sec[0], geo->clba);
21d22871 984
b5e063a2 985 if (lm->min_blk_line > lm->blk_per_line) {
4e495a46 986 pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n",
b5e063a2 987 lm->blk_per_line);
e411b331 988 return -EINVAL;
b5e063a2 989 }
a4bd217b 990
43d47127
JG
991 return 0;
992}
993
994static int pblk_lines_init(struct pblk *pblk)
995{
996 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
43d47127 997 struct pblk_line *line;
32ef9412 998 void *chunk_meta;
3bcebc5b 999 int nr_free_chks = 0;
43d47127
JG
1000 int i, ret;
1001
1002 ret = pblk_line_meta_init(pblk);
dd2a4343 1003 if (ret)
e411b331 1004 return ret;
a4bd217b 1005
43d47127
JG
1006 ret = pblk_line_mg_init(pblk);
1007 if (ret)
1008 return ret;
1009
1010 ret = pblk_luns_init(pblk);
1011 if (ret)
a4bd217b
JG
1012 goto fail_free_meta;
1013
aff3fb18 1014 chunk_meta = pblk_get_chunk_meta(pblk);
32ef9412
JG
1015 if (IS_ERR(chunk_meta)) {
1016 ret = PTR_ERR(chunk_meta);
43d47127 1017 goto fail_free_luns;
1c6286f2 1018 }
a4bd217b 1019
a4bd217b
JG
1020 pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
1021 GFP_KERNEL);
1c6286f2
DC
1022 if (!pblk->lines) {
1023 ret = -ENOMEM;
32ef9412 1024 goto fail_free_chunk_meta;
e411b331
JG
1025 }
1026
a4bd217b
JG
1027 for (i = 0; i < l_mg->nr_lines; i++) {
1028 line = &pblk->lines[i];
1029
32ef9412 1030 ret = pblk_alloc_line_meta(pblk, line);
dffdd960 1031 if (ret)
43d47127 1032 goto fail_free_lines;
dffdd960 1033
32ef9412 1034 nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
f2937232
HH
1035
1036 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1037 line->state);
a4bd217b
JG
1038 }
1039
2deeefc0 1040 if (!nr_free_chks) {
4e495a46 1041 pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
a70985f8
WY
1042 ret = -EINTR;
1043 goto fail_free_lines;
2deeefc0
JG
1044 }
1045
3bcebc5b
HH
1046 ret = pblk_set_provision(pblk, nr_free_chks);
1047 if (ret)
1048 goto fail_free_lines;
a4bd217b 1049
090ee26f 1050 vfree(chunk_meta);
a4bd217b 1051 return 0;
e411b331 1052
43d47127 1053fail_free_lines:
dffdd960 1054 while (--i >= 0)
48b8d208 1055 pblk_line_meta_free(l_mg, &pblk->lines[i]);
43d47127 1056 kfree(pblk->lines);
32ef9412 1057fail_free_chunk_meta:
0934ce87 1058 vfree(chunk_meta);
43d47127
JG
1059fail_free_luns:
1060 kfree(pblk->luns);
a4bd217b 1061fail_free_meta:
e411b331 1062 pblk_line_mg_free(pblk);
a4bd217b
JG
1063
1064 return ret;
1065}
1066
1067static int pblk_writer_init(struct pblk *pblk)
1068{
a4bd217b
JG
1069 pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
1070 if (IS_ERR(pblk->writer_ts)) {
cc4f5ba1
JG
1071 int err = PTR_ERR(pblk->writer_ts);
1072
1073 if (err != -EINTR)
4e495a46 1074 pblk_err(pblk, "could not allocate writer kthread (%d)\n",
cc4f5ba1
JG
1075 err);
1076 return err;
a4bd217b
JG
1077 }
1078
cc4f5ba1
JG
1079 timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
1080 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
1081
a4bd217b
JG
1082 return 0;
1083}
1084
1085static void pblk_writer_stop(struct pblk *pblk)
1086{
ee8d5c1a
JG
1087 /* The pipeline must be stopped and the write buffer emptied before the
1088 * write thread is stopped
1089 */
1090 WARN(pblk_rb_read_count(&pblk->rwb),
1091 "Stopping not fully persisted write buffer\n");
1092
1093 WARN(pblk_rb_sync_count(&pblk->rwb),
1094 "Stopping not fully synced write buffer\n");
1095
7be970b2 1096 del_timer_sync(&pblk->wtimer);
a4bd217b
JG
1097 if (pblk->writer_ts)
1098 kthread_stop(pblk->writer_ts);
a4bd217b
JG
1099}
1100
1101static void pblk_free(struct pblk *pblk)
1102{
a4bd217b 1103 pblk_lines_free(pblk);
a4bd217b 1104 pblk_l2p_free(pblk);
43d47127
JG
1105 pblk_rwb_free(pblk);
1106 pblk_core_free(pblk);
a4bd217b
JG
1107
1108 kfree(pblk);
1109}
1110
a7c9e910 1111static void pblk_tear_down(struct pblk *pblk, bool graceful)
a4bd217b 1112{
a7c9e910
JG
1113 if (graceful)
1114 __pblk_pipeline_flush(pblk);
1115 __pblk_pipeline_stop(pblk);
a4bd217b
JG
1116 pblk_writer_stop(pblk);
1117 pblk_rb_sync_l2p(&pblk->rwb);
a4bd217b
JG
1118 pblk_rl_free(&pblk->rl);
1119
4e495a46 1120 pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful);
a4bd217b
JG
1121}
1122
a7c9e910 1123static void pblk_exit(void *private, bool graceful)
a4bd217b
JG
1124{
1125 struct pblk *pblk = private;
1126
a7c9e910
JG
1127 pblk_gc_exit(pblk, graceful);
1128 pblk_tear_down(pblk, graceful);
c5586192 1129
880eda54 1130#ifdef CONFIG_NVM_PBLK_DEBUG
4e495a46 1131 pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
c5586192
HH
1132#endif
1133
a4bd217b 1134 pblk_free(pblk);
a4bd217b
JG
1135}
1136
1137static sector_t pblk_capacity(void *private)
1138{
1139 struct pblk *pblk = private;
1140
1141 return pblk->capacity * NR_PHY_IN_LOG;
1142}
1143
1144static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1145 int flags)
1146{
1147 struct nvm_geo *geo = &dev->geo;
1148 struct request_queue *bqueue = dev->q;
1149 struct request_queue *tqueue = tdisk->queue;
1150 struct pblk *pblk;
1151 int ret;
1152
4e495a46
MB
1153 pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
1154 if (!pblk)
1155 return ERR_PTR(-ENOMEM);
1156
1157 pblk->dev = dev;
1158 pblk->disk = tdisk;
1159 pblk->state = PBLK_STATE_RUNNING;
1b0dd0bf 1160 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
4e495a46
MB
1161 pblk->gc.gc_enabled = 0;
1162
3b2a3ad1
JG
1163 if (!(geo->version == NVM_OCSSD_SPEC_12 ||
1164 geo->version == NVM_OCSSD_SPEC_20)) {
4e495a46 1165 pblk_err(pblk, "OCSSD version not supported (%u)\n",
7ad5039e 1166 geo->version);
4e495a46 1167 kfree(pblk);
7ad5039e
JG
1168 return ERR_PTR(-EINVAL);
1169 }
1170
a16816b9
IK
1171 if (geo->ext) {
1172 pblk_err(pblk, "extended metadata not supported\n");
1173 kfree(pblk);
1174 return ERR_PTR(-EINVAL);
1175 }
1176
6a3abf5b 1177 spin_lock_init(&pblk->resubmit_lock);
a4bd217b
JG
1178 spin_lock_init(&pblk->trans_lock);
1179 spin_lock_init(&pblk->lock);
1180
880eda54 1181#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
1182 atomic_long_set(&pblk->inflight_writes, 0);
1183 atomic_long_set(&pblk->padded_writes, 0);
1184 atomic_long_set(&pblk->padded_wb, 0);
a4bd217b
JG
1185 atomic_long_set(&pblk->req_writes, 0);
1186 atomic_long_set(&pblk->sub_writes, 0);
1187 atomic_long_set(&pblk->sync_writes, 0);
a4bd217b 1188 atomic_long_set(&pblk->inflight_reads, 0);
db7ada33 1189 atomic_long_set(&pblk->cache_reads, 0);
a4bd217b
JG
1190 atomic_long_set(&pblk->sync_reads, 0);
1191 atomic_long_set(&pblk->recov_writes, 0);
1192 atomic_long_set(&pblk->recov_writes, 0);
1193 atomic_long_set(&pblk->recov_gc_writes, 0);
a1121176 1194 atomic_long_set(&pblk->recov_gc_reads, 0);
a4bd217b
JG
1195#endif
1196
1197 atomic_long_set(&pblk->read_failed, 0);
1198 atomic_long_set(&pblk->read_empty, 0);
1199 atomic_long_set(&pblk->read_high_ecc, 0);
1200 atomic_long_set(&pblk->read_failed_gc, 0);
1201 atomic_long_set(&pblk->write_failed, 0);
1202 atomic_long_set(&pblk->erase_failed, 0);
1203
43d47127 1204 ret = pblk_core_init(pblk);
a4bd217b 1205 if (ret) {
4e495a46 1206 pblk_err(pblk, "could not initialize core\n");
a4bd217b
JG
1207 goto fail;
1208 }
1209
1210 ret = pblk_lines_init(pblk);
1211 if (ret) {
4e495a46 1212 pblk_err(pblk, "could not initialize lines\n");
43d47127 1213 goto fail_free_core;
5d149bfa
HH
1214 }
1215
43d47127 1216 ret = pblk_rwb_init(pblk);
a4bd217b 1217 if (ret) {
4e495a46 1218 pblk_err(pblk, "could not initialize write buffer\n");
43d47127 1219 goto fail_free_lines;
a4bd217b
JG
1220 }
1221
43d47127 1222 ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
a4bd217b 1223 if (ret) {
4e495a46 1224 pblk_err(pblk, "could not initialize maps\n");
43d47127 1225 goto fail_free_rwb;
a4bd217b
JG
1226 }
1227
1228 ret = pblk_writer_init(pblk);
1229 if (ret) {
cc4f5ba1 1230 if (ret != -EINTR)
4e495a46 1231 pblk_err(pblk, "could not initialize write thread\n");
43d47127 1232 goto fail_free_l2p;
a4bd217b
JG
1233 }
1234
1235 ret = pblk_gc_init(pblk);
1236 if (ret) {
4e495a46 1237 pblk_err(pblk, "could not initialize gc\n");
a4bd217b
JG
1238 goto fail_stop_writer;
1239 }
1240
1241 /* inherit the size from the underlying device */
1242 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1243 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1244
1245 blk_queue_write_cache(tqueue, true, false);
1246
e46f4e48 1247 tqueue->limits.discard_granularity = geo->clba * geo->csecs;
a4bd217b
JG
1248 tqueue->limits.discard_alignment = 0;
1249 blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
8b904b5b 1250 blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
a4bd217b 1251
4e495a46 1252 pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
fae7fae4 1253 geo->all_luns, pblk->l_mg.nr_lines,
847a3a27 1254 (unsigned long long)pblk->capacity,
a4bd217b
JG
1255 pblk->rwb.nr_entries);
1256
1257 wake_up_process(pblk->writer_ts);
03661b5f
HH
1258
1259 /* Check if we need to start GC */
1260 pblk_gc_should_kick(pblk);
1261
a4bd217b
JG
1262 return pblk;
1263
1264fail_stop_writer:
1265 pblk_writer_stop(pblk);
a4bd217b
JG
1266fail_free_l2p:
1267 pblk_l2p_free(pblk);
43d47127
JG
1268fail_free_rwb:
1269 pblk_rwb_free(pblk);
1270fail_free_lines:
1271 pblk_lines_free(pblk);
a4bd217b
JG
1272fail_free_core:
1273 pblk_core_free(pblk);
a4bd217b
JG
1274fail:
1275 kfree(pblk);
1276 return ERR_PTR(ret);
1277}
1278
1279/* physical block device target */
1280static struct nvm_tgt_type tt_pblk = {
1281 .name = "pblk",
1282 .version = {1, 0, 0},
1283
1284 .make_rq = pblk_make_rq,
1285 .capacity = pblk_capacity,
1286
1287 .init = pblk_init,
1288 .exit = pblk_exit,
1289
1290 .sysfs_init = pblk_sysfs_init,
1291 .sysfs_exit = pblk_sysfs_exit,
90014829 1292 .owner = THIS_MODULE,
a4bd217b
JG
1293};
1294
1295static int __init pblk_module_init(void)
1296{
b25d5237
N
1297 int ret;
1298
b906bbb6
KO
1299 ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0);
1300 if (ret)
1301 return ret;
b25d5237
N
1302 ret = nvm_register_tgt_type(&tt_pblk);
1303 if (ret)
b906bbb6 1304 bioset_exit(&pblk_bio_set);
b25d5237 1305 return ret;
a4bd217b
JG
1306}
1307
1308static void pblk_module_exit(void)
1309{
b906bbb6 1310 bioset_exit(&pblk_bio_set);
a4bd217b
JG
1311 nvm_unregister_tgt_type(&tt_pblk);
1312}
1313
1314module_init(pblk_module_init);
1315module_exit(pblk_module_exit);
1316MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1317MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1318MODULE_LICENSE("GPL v2");
1319MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");