lightnvm: pblk: redesign GC algorithm
[linux-block.git] / drivers / lightnvm / pblk-init.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * Implementation of a physical block-device target for Open-channel SSDs.
17 *
18 * pblk-init.c - pblk's initialization.
19 */
20
21#include "pblk.h"
22
084ec9ba
JG
23static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
24 *pblk_w_rq_cache, *pblk_line_meta_cache;
a4bd217b 25static DECLARE_RWSEM(pblk_lock);
b25d5237 26struct bio_set *pblk_bio_set;
a4bd217b
JG
27
28static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
29 struct bio *bio)
30{
31 int ret;
32
33 /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
34 * constraint. Writes can be of arbitrary size.
35 */
36 if (bio_data_dir(bio) == READ) {
af67c31f 37 blk_queue_split(q, &bio);
a4bd217b
JG
38 ret = pblk_submit_read(pblk, bio);
39 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
40 bio_put(bio);
41
42 return ret;
43 }
44
45 /* Prevent deadlock in the case of a modest LUN configuration and large
46 * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
47 * available for user I/O.
48 */
49 if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl)))
af67c31f 50 blk_queue_split(q, &bio);
a4bd217b
JG
51
52 return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
53}
54
55static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
56{
57 struct pblk *pblk = q->queuedata;
58
59 if (bio_op(bio) == REQ_OP_DISCARD) {
60 pblk_discard(pblk, bio);
61 if (!(bio->bi_opf & REQ_PREFLUSH)) {
62 bio_endio(bio);
63 return BLK_QC_T_NONE;
64 }
65 }
66
67 switch (pblk_rw_io(q, pblk, bio)) {
68 case NVM_IO_ERR:
69 bio_io_error(bio);
70 break;
71 case NVM_IO_DONE:
72 bio_endio(bio);
73 break;
74 }
75
76 return BLK_QC_T_NONE;
77}
78
79static void pblk_l2p_free(struct pblk *pblk)
80{
81 vfree(pblk->trans_map);
82}
83
84static int pblk_l2p_init(struct pblk *pblk)
85{
86 sector_t i;
87 struct ppa_addr ppa;
88 int entry_size = 8;
89
90 if (pblk->ppaf_bitsize < 32)
91 entry_size = 4;
92
93 pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
94 if (!pblk->trans_map)
95 return -ENOMEM;
96
97 pblk_ppa_set_empty(&ppa);
98
99 for (i = 0; i < pblk->rl.nr_secs; i++)
100 pblk_trans_map_set(pblk, i, ppa);
101
102 return 0;
103}
104
105static void pblk_rwb_free(struct pblk *pblk)
106{
107 if (pblk_rb_tear_down_check(&pblk->rwb))
108 pr_err("pblk: write buffer error on tear down\n");
109
110 pblk_rb_data_free(&pblk->rwb);
111 vfree(pblk_rb_entries_ref(&pblk->rwb));
112}
113
114static int pblk_rwb_init(struct pblk *pblk)
115{
116 struct nvm_tgt_dev *dev = pblk->dev;
117 struct nvm_geo *geo = &dev->geo;
118 struct pblk_rb_entry *entries;
119 unsigned long nr_entries;
120 unsigned int power_size, power_seg_sz;
121
122 nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
123
124 entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
125 if (!entries)
126 return -ENOMEM;
127
128 power_size = get_count_order(nr_entries);
129 power_seg_sz = get_count_order(geo->sec_size);
130
131 return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
132}
133
134/* Minimum pages needed within a lun */
135#define PAGE_POOL_SIZE 16
136#define ADDR_POOL_SIZE 64
137
138static int pblk_set_ppaf(struct pblk *pblk)
139{
140 struct nvm_tgt_dev *dev = pblk->dev;
141 struct nvm_geo *geo = &dev->geo;
142 struct nvm_addr_format ppaf = geo->ppaf;
143 int power_len;
144
145 /* Re-calculate channel and lun format to adapt to configuration */
146 power_len = get_count_order(geo->nr_chnls);
147 if (1 << power_len != geo->nr_chnls) {
148 pr_err("pblk: supports only power-of-two channel config.\n");
149 return -EINVAL;
150 }
151 ppaf.ch_len = power_len;
152
153 power_len = get_count_order(geo->luns_per_chnl);
154 if (1 << power_len != geo->luns_per_chnl) {
155 pr_err("pblk: supports only power-of-two LUN config.\n");
156 return -EINVAL;
157 }
158 ppaf.lun_len = power_len;
159
160 pblk->ppaf.sec_offset = 0;
161 pblk->ppaf.pln_offset = ppaf.sect_len;
162 pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
163 pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
164 pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
165 pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
166 pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
167 pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
168 pblk->ppaf.pln_offset;
169 pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
170 pblk->ppaf.ch_offset;
171 pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
172 pblk->ppaf.lun_offset;
173 pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
174 pblk->ppaf.pg_offset;
175 pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
176 pblk->ppaf.blk_offset;
177
178 pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
179
180 return 0;
181}
182
183static int pblk_init_global_caches(struct pblk *pblk)
184{
185 char cache_name[PBLK_CACHE_NAME_LEN];
186
187 down_write(&pblk_lock);
188 pblk_blk_ws_cache = kmem_cache_create("pblk_blk_ws",
189 sizeof(struct pblk_line_ws), 0, 0, NULL);
190 if (!pblk_blk_ws_cache) {
191 up_write(&pblk_lock);
192 return -ENOMEM;
193 }
194
195 pblk_rec_cache = kmem_cache_create("pblk_rec",
196 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
197 if (!pblk_rec_cache) {
198 kmem_cache_destroy(pblk_blk_ws_cache);
199 up_write(&pblk_lock);
200 return -ENOMEM;
201 }
202
084ec9ba 203 pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
a4bd217b 204 0, 0, NULL);
084ec9ba 205 if (!pblk_g_rq_cache) {
a4bd217b
JG
206 kmem_cache_destroy(pblk_blk_ws_cache);
207 kmem_cache_destroy(pblk_rec_cache);
208 up_write(&pblk_lock);
209 return -ENOMEM;
210 }
211
212 pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
213 0, 0, NULL);
214 if (!pblk_w_rq_cache) {
215 kmem_cache_destroy(pblk_blk_ws_cache);
216 kmem_cache_destroy(pblk_rec_cache);
084ec9ba 217 kmem_cache_destroy(pblk_g_rq_cache);
a4bd217b
JG
218 up_write(&pblk_lock);
219 return -ENOMEM;
220 }
221
222 snprintf(cache_name, sizeof(cache_name), "pblk_line_m_%s",
223 pblk->disk->disk_name);
224 pblk_line_meta_cache = kmem_cache_create(cache_name,
225 pblk->lm.sec_bitmap_len, 0, 0, NULL);
226 if (!pblk_line_meta_cache) {
227 kmem_cache_destroy(pblk_blk_ws_cache);
228 kmem_cache_destroy(pblk_rec_cache);
084ec9ba 229 kmem_cache_destroy(pblk_g_rq_cache);
a4bd217b
JG
230 kmem_cache_destroy(pblk_w_rq_cache);
231 up_write(&pblk_lock);
232 return -ENOMEM;
233 }
234 up_write(&pblk_lock);
235
236 return 0;
237}
238
239static int pblk_core_init(struct pblk *pblk)
240{
241 struct nvm_tgt_dev *dev = pblk->dev;
242 struct nvm_geo *geo = &dev->geo;
a4bd217b 243
a4bd217b
JG
244 pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
245 geo->nr_planes * geo->nr_luns;
246
a4bd217b
JG
247 if (pblk_init_global_caches(pblk))
248 return -ENOMEM;
249
250 pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
251 if (!pblk->page_pool)
252 return -ENOMEM;
253
254 pblk->line_ws_pool = mempool_create_slab_pool(geo->nr_luns,
255 pblk_blk_ws_cache);
256 if (!pblk->line_ws_pool)
257 goto free_page_pool;
258
259 pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
260 if (!pblk->rec_pool)
261 goto free_blk_ws_pool;
262
084ec9ba
JG
263 pblk->g_rq_pool = mempool_create_slab_pool(64, pblk_g_rq_cache);
264 if (!pblk->g_rq_pool)
a4bd217b
JG
265 goto free_rec_pool;
266
267 pblk->w_rq_pool = mempool_create_slab_pool(64, pblk_w_rq_cache);
268 if (!pblk->w_rq_pool)
084ec9ba 269 goto free_g_rq_pool;
a4bd217b
JG
270
271 pblk->line_meta_pool =
272 mempool_create_slab_pool(16, pblk_line_meta_cache);
273 if (!pblk->line_meta_pool)
274 goto free_w_rq_pool;
275
276 pblk->kw_wq = alloc_workqueue("pblk-aux-wq",
277 WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
278 if (!pblk->kw_wq)
279 goto free_line_meta_pool;
280
281 if (pblk_set_ppaf(pblk))
282 goto free_kw_wq;
283
284 if (pblk_rwb_init(pblk))
285 goto free_kw_wq;
286
287 INIT_LIST_HEAD(&pblk->compl_list);
288 return 0;
289
290free_kw_wq:
291 destroy_workqueue(pblk->kw_wq);
292free_line_meta_pool:
293 mempool_destroy(pblk->line_meta_pool);
294free_w_rq_pool:
295 mempool_destroy(pblk->w_rq_pool);
084ec9ba
JG
296free_g_rq_pool:
297 mempool_destroy(pblk->g_rq_pool);
a4bd217b
JG
298free_rec_pool:
299 mempool_destroy(pblk->rec_pool);
300free_blk_ws_pool:
301 mempool_destroy(pblk->line_ws_pool);
302free_page_pool:
303 mempool_destroy(pblk->page_pool);
304 return -ENOMEM;
305}
306
307static void pblk_core_free(struct pblk *pblk)
308{
309 if (pblk->kw_wq)
310 destroy_workqueue(pblk->kw_wq);
311
312 mempool_destroy(pblk->page_pool);
313 mempool_destroy(pblk->line_ws_pool);
314 mempool_destroy(pblk->rec_pool);
084ec9ba 315 mempool_destroy(pblk->g_rq_pool);
a4bd217b
JG
316 mempool_destroy(pblk->w_rq_pool);
317 mempool_destroy(pblk->line_meta_pool);
318
319 kmem_cache_destroy(pblk_blk_ws_cache);
320 kmem_cache_destroy(pblk_rec_cache);
084ec9ba 321 kmem_cache_destroy(pblk_g_rq_cache);
a4bd217b
JG
322 kmem_cache_destroy(pblk_w_rq_cache);
323 kmem_cache_destroy(pblk_line_meta_cache);
324}
325
326static void pblk_luns_free(struct pblk *pblk)
327{
328 kfree(pblk->luns);
329}
330
dffdd960
JG
331static void pblk_free_line_bitmaps(struct pblk_line *line)
332{
333 kfree(line->blk_bitmap);
334 kfree(line->erase_bitmap);
335}
336
a4bd217b
JG
337static void pblk_lines_free(struct pblk *pblk)
338{
339 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
340 struct pblk_line *line;
341 int i;
342
343 spin_lock(&l_mg->free_lock);
344 for (i = 0; i < l_mg->nr_lines; i++) {
345 line = &pblk->lines[i];
346
347 pblk_line_free(pblk, line);
dffdd960 348 pblk_free_line_bitmaps(line);
a4bd217b
JG
349 }
350 spin_unlock(&l_mg->free_lock);
351}
352
353static void pblk_line_meta_free(struct pblk *pblk)
354{
355 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
356 int i;
357
358 kfree(l_mg->bb_template);
359 kfree(l_mg->bb_aux);
dd2a4343 360 kfree(l_mg->vsc_list);
a4bd217b
JG
361
362 for (i = 0; i < PBLK_DATA_LINES; i++) {
f680f19a 363 kfree(l_mg->sline_meta[i]);
dd2a4343 364 pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
f680f19a 365 kfree(l_mg->eline_meta[i]);
a4bd217b
JG
366 }
367
368 kfree(pblk->lines);
369}
370
371static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
372{
373 struct nvm_geo *geo = &dev->geo;
374 struct ppa_addr ppa;
375 u8 *blks;
376 int nr_blks, ret;
377
378 nr_blks = geo->blks_per_lun * geo->plane_mode;
379 blks = kmalloc(nr_blks, GFP_KERNEL);
380 if (!blks)
381 return -ENOMEM;
382
383 ppa.ppa = 0;
384 ppa.g.ch = rlun->bppa.g.ch;
385 ppa.g.lun = rlun->bppa.g.lun;
386
387 ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
388 if (ret)
389 goto out;
390
391 nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
392 if (nr_blks < 0) {
a4bd217b 393 ret = nr_blks;
5136a4fd 394 goto out;
a4bd217b
JG
395 }
396
397 rlun->bb_list = blks;
398
5136a4fd 399 return 0;
a4bd217b 400out:
5136a4fd 401 kfree(blks);
a4bd217b
JG
402 return ret;
403}
404
dffdd960
JG
405static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
406 int blk_per_line)
a4bd217b 407{
dffdd960
JG
408 struct nvm_tgt_dev *dev = pblk->dev;
409 struct nvm_geo *geo = &dev->geo;
a4bd217b
JG
410 struct pblk_lun *rlun;
411 int bb_cnt = 0;
412 int i;
413
dffdd960
JG
414 for (i = 0; i < blk_per_line; i++) {
415 rlun = &pblk->luns[i];
416 if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
417 continue;
418
419 set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
420 bb_cnt++;
421 }
422
423 return bb_cnt;
424}
425
426static int pblk_alloc_line_bitmaps(struct pblk *pblk, struct pblk_line *line)
427{
428 struct pblk_line_meta *lm = &pblk->lm;
429
a4bd217b
JG
430 line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
431 if (!line->blk_bitmap)
432 return -ENOMEM;
433
434 line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
435 if (!line->erase_bitmap) {
436 kfree(line->blk_bitmap);
437 return -ENOMEM;
438 }
439
dffdd960 440 return 0;
a4bd217b
JG
441}
442
443static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
444{
445 struct nvm_tgt_dev *dev = pblk->dev;
446 struct nvm_geo *geo = &dev->geo;
447 struct pblk_lun *rlun;
448 int i, ret;
449
450 /* TODO: Implement unbalanced LUN support */
451 if (geo->luns_per_chnl < 0) {
452 pr_err("pblk: unbalanced LUN config.\n");
453 return -EINVAL;
454 }
455
456 pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
457 if (!pblk->luns)
458 return -ENOMEM;
459
460 for (i = 0; i < geo->nr_luns; i++) {
461 /* Stripe across channels */
462 int ch = i % geo->nr_chnls;
463 int lun_raw = i / geo->nr_chnls;
464 int lunid = lun_raw + ch * geo->luns_per_chnl;
465
466 rlun = &pblk->luns[i];
467 rlun->bppa = luns[lunid];
468
469 sema_init(&rlun->wr_sem, 1);
470
471 ret = pblk_bb_discovery(dev, rlun);
472 if (ret) {
473 while (--i >= 0)
474 kfree(pblk->luns[i].bb_list);
475 return ret;
476 }
477 }
478
479 return 0;
480}
481
482static int pblk_lines_configure(struct pblk *pblk, int flags)
483{
484 struct pblk_line *line = NULL;
485 int ret = 0;
486
487 if (!(flags & NVM_TARGET_FACTORY)) {
488 line = pblk_recov_l2p(pblk);
489 if (IS_ERR(line)) {
490 pr_err("pblk: could not recover l2p table\n");
491 ret = -EFAULT;
492 }
493 }
494
495 if (!line) {
496 /* Configure next line for user data */
497 line = pblk_line_get_first_data(pblk);
498 if (!line) {
499 pr_err("pblk: line list corrupted\n");
500 ret = -EFAULT;
501 }
502 }
503
504 return ret;
505}
506
507/* See comment over struct line_emeta definition */
dd2a4343 508static unsigned int calc_emeta_len(struct pblk *pblk)
a4bd217b 509{
dd2a4343
JG
510 struct pblk_line_meta *lm = &pblk->lm;
511 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
512 struct nvm_tgt_dev *dev = pblk->dev;
513 struct nvm_geo *geo = &dev->geo;
514
515 /* Round to sector size so that lba_list starts on its own sector */
516 lm->emeta_sec[1] = DIV_ROUND_UP(
517 sizeof(struct line_emeta) + lm->blk_bitmap_len,
518 geo->sec_size);
519 lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;
520
521 /* Round to sector size so that vsc_list starts on its own sector */
522 lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
523 lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
524 geo->sec_size);
525 lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;
526
527 lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
528 geo->sec_size);
529 lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;
530
531 lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
532
533 return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
a4bd217b
JG
534}
535
536static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
537{
538 struct nvm_tgt_dev *dev = pblk->dev;
539 struct nvm_geo *geo = &dev->geo;
540 sector_t provisioned;
541
542 pblk->over_pct = 20;
543
544 provisioned = nr_free_blks;
545 provisioned *= (100 - pblk->over_pct);
546 sector_div(provisioned, 100);
547
548 /* Internally pblk manages all free blocks, but all calculations based
549 * on user capacity consider only provisioned blocks
550 */
551 pblk->rl.total_blocks = nr_free_blks;
552 pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
553 pblk->capacity = provisioned * geo->sec_per_blk;
554 atomic_set(&pblk->rl.free_blocks, nr_free_blks);
555}
556
dd2a4343
JG
557static int pblk_lines_alloc_metadata(struct pblk *pblk)
558{
559 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
560 struct pblk_line_meta *lm = &pblk->lm;
561 int i;
562
563 /* smeta is always small enough to fit on a kmalloc memory allocation,
564 * emeta depends on the number of LUNs allocated to the pblk instance
565 */
dd2a4343
JG
566 for (i = 0; i < PBLK_DATA_LINES; i++) {
567 l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
568 if (!l_mg->sline_meta[i])
569 goto fail_free_smeta;
570 }
571
572 /* emeta allocates three different buffers for managing metadata with
573 * in-memory and in-media layouts
574 */
575 for (i = 0; i < PBLK_DATA_LINES; i++) {
576 struct pblk_emeta *emeta;
577
578 emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
579 if (!emeta)
580 goto fail_free_emeta;
581
582 if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
583 l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
584
585 emeta->buf = vmalloc(lm->emeta_len[0]);
586 if (!emeta->buf) {
587 kfree(emeta);
588 goto fail_free_emeta;
589 }
590
591 emeta->nr_entries = lm->emeta_sec[0];
592 l_mg->eline_meta[i] = emeta;
593 } else {
594 l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
595
596 emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
597 if (!emeta->buf) {
598 kfree(emeta);
599 goto fail_free_emeta;
600 }
601
602 emeta->nr_entries = lm->emeta_sec[0];
603 l_mg->eline_meta[i] = emeta;
604 }
605 }
606
607 l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
608 if (!l_mg->vsc_list)
609 goto fail_free_emeta;
610
611 for (i = 0; i < l_mg->nr_lines; i++)
612 l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
613
614 return 0;
615
616fail_free_emeta:
617 while (--i >= 0) {
618 vfree(l_mg->eline_meta[i]->buf);
f680f19a 619 kfree(l_mg->eline_meta[i]);
dd2a4343
JG
620 }
621
622fail_free_smeta:
623 for (i = 0; i < PBLK_DATA_LINES; i++)
f680f19a 624 kfree(l_mg->sline_meta[i]);
dd2a4343
JG
625
626 return -ENOMEM;
627}
628
a4bd217b
JG
629static int pblk_lines_init(struct pblk *pblk)
630{
631 struct nvm_tgt_dev *dev = pblk->dev;
632 struct nvm_geo *geo = &dev->geo;
633 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
634 struct pblk_line_meta *lm = &pblk->lm;
635 struct pblk_line *line;
636 unsigned int smeta_len, emeta_len;
d624f371 637 long nr_bad_blks, nr_free_blks;
dd2a4343
JG
638 int bb_distance, max_write_ppas, mod;
639 int i, ret;
640
641 pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
642 max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
643 pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
644 max_write_ppas : nvm_max_phys_sects(dev);
645 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
646
647 if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
648 pr_err("pblk: cannot support device max_phys_sect\n");
649 return -EINVAL;
650 }
651
652 div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
653 if (mod) {
654 pr_err("pblk: bad configuration of sectors/pages\n");
655 return -EINVAL;
656 }
657
658 l_mg->nr_lines = geo->blks_per_lun;
659 l_mg->log_line = l_mg->data_line = NULL;
660 l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
661 l_mg->nr_free_lines = 0;
662 bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
a4bd217b
JG
663
664 lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
665 lm->blk_per_line = geo->nr_luns;
666 lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
667 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
668 lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
669 lm->high_thrs = lm->sec_per_line / 2;
670 lm->mid_thrs = lm->sec_per_line / 4;
dd2a4343 671 lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
a4bd217b
JG
672
673 /* Calculate necessary pages for smeta. See comment over struct
674 * line_smeta definition
675 */
a4bd217b
JG
676 i = 1;
677add_smeta_page:
678 lm->smeta_sec = i * geo->sec_per_pl;
679 lm->smeta_len = lm->smeta_sec * geo->sec_size;
680
dd2a4343 681 smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
a4bd217b
JG
682 if (smeta_len > lm->smeta_len) {
683 i++;
684 goto add_smeta_page;
685 }
686
687 /* Calculate necessary pages for emeta. See comment over struct
688 * line_emeta definition
689 */
690 i = 1;
691add_emeta_page:
dd2a4343
JG
692 lm->emeta_sec[0] = i * geo->sec_per_pl;
693 lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
a4bd217b 694
dd2a4343
JG
695 emeta_len = calc_emeta_len(pblk);
696 if (emeta_len > lm->emeta_len[0]) {
a4bd217b
JG
697 i++;
698 goto add_emeta_page;
699 }
a4bd217b 700
dd2a4343
JG
701 lm->emeta_bb = geo->nr_luns - i;
702 lm->min_blk_line = 1 + DIV_ROUND_UP(lm->smeta_sec + lm->emeta_sec[0],
d624f371 703 geo->sec_per_blk);
a4bd217b 704
dd2a4343
JG
705 ret = pblk_lines_alloc_metadata(pblk);
706 if (ret)
707 goto fail;
a4bd217b
JG
708
709 l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
1c6286f2
DC
710 if (!l_mg->bb_template) {
711 ret = -ENOMEM;
a4bd217b 712 goto fail_free_meta;
1c6286f2 713 }
a4bd217b
JG
714
715 l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
1c6286f2
DC
716 if (!l_mg->bb_aux) {
717 ret = -ENOMEM;
a4bd217b 718 goto fail_free_bb_template;
1c6286f2 719 }
a4bd217b
JG
720
721 bb_distance = (geo->nr_luns) * geo->sec_per_pl;
722 for (i = 0; i < lm->sec_per_line; i += bb_distance)
723 bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
724
725 INIT_LIST_HEAD(&l_mg->free_list);
726 INIT_LIST_HEAD(&l_mg->corrupt_list);
727 INIT_LIST_HEAD(&l_mg->bad_list);
728 INIT_LIST_HEAD(&l_mg->gc_full_list);
729 INIT_LIST_HEAD(&l_mg->gc_high_list);
730 INIT_LIST_HEAD(&l_mg->gc_mid_list);
731 INIT_LIST_HEAD(&l_mg->gc_low_list);
732 INIT_LIST_HEAD(&l_mg->gc_empty_list);
733
dd2a4343
JG
734 INIT_LIST_HEAD(&l_mg->emeta_list);
735
a4bd217b
JG
736 l_mg->gc_lists[0] = &l_mg->gc_high_list;
737 l_mg->gc_lists[1] = &l_mg->gc_mid_list;
738 l_mg->gc_lists[2] = &l_mg->gc_low_list;
739
740 spin_lock_init(&l_mg->free_lock);
dd2a4343 741 spin_lock_init(&l_mg->close_lock);
a4bd217b
JG
742 spin_lock_init(&l_mg->gc_lock);
743
744 pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
745 GFP_KERNEL);
1c6286f2
DC
746 if (!pblk->lines) {
747 ret = -ENOMEM;
a4bd217b 748 goto fail_free_bb_aux;
1c6286f2 749 }
a4bd217b
JG
750
751 nr_free_blks = 0;
752 for (i = 0; i < l_mg->nr_lines; i++) {
a44f53fa
JG
753 int blk_in_line;
754
a4bd217b
JG
755 line = &pblk->lines[i];
756
757 line->pblk = pblk;
758 line->id = i;
759 line->type = PBLK_LINETYPE_FREE;
760 line->state = PBLK_LINESTATE_FREE;
761 line->gc_group = PBLK_LINEGC_NONE;
dd2a4343 762 line->vsc = &l_mg->vsc_list[i];
a4bd217b
JG
763 spin_lock_init(&line->lock);
764
dffdd960
JG
765 ret = pblk_alloc_line_bitmaps(pblk, line);
766 if (ret)
767 goto fail_free_lines;
768
769 nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line);
1c6286f2 770 if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
dffdd960 771 pblk_free_line_bitmaps(line);
1c6286f2 772 ret = -EINVAL;
a4bd217b 773 goto fail_free_lines;
1c6286f2 774 }
a4bd217b 775
a44f53fa
JG
776 blk_in_line = lm->blk_per_line - nr_bad_blks;
777 if (blk_in_line < lm->min_blk_line) {
a4bd217b
JG
778 line->state = PBLK_LINESTATE_BAD;
779 list_add_tail(&line->list, &l_mg->bad_list);
780 continue;
781 }
782
a44f53fa
JG
783 nr_free_blks += blk_in_line;
784 atomic_set(&line->blk_in_line, blk_in_line);
a4bd217b
JG
785
786 l_mg->nr_free_lines++;
787 list_add_tail(&line->list, &l_mg->free_list);
788 }
789
790 pblk_set_provision(pblk, nr_free_blks);
791
a4bd217b
JG
792 /* Cleanup per-LUN bad block lists - managed within lines on run-time */
793 for (i = 0; i < geo->nr_luns; i++)
794 kfree(pblk->luns[i].bb_list);
795
796 return 0;
797fail_free_lines:
dffdd960
JG
798 while (--i >= 0)
799 pblk_free_line_bitmaps(&pblk->lines[i]);
800
a4bd217b
JG
801 kfree(pblk->lines);
802fail_free_bb_aux:
803 kfree(l_mg->bb_aux);
804fail_free_bb_template:
805 kfree(l_mg->bb_template);
806fail_free_meta:
dd2a4343 807 pblk_line_meta_free(pblk);
a4bd217b
JG
808fail:
809 for (i = 0; i < geo->nr_luns; i++)
810 kfree(pblk->luns[i].bb_list);
811
812 return ret;
813}
814
815static int pblk_writer_init(struct pblk *pblk)
816{
817 setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
818 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
819
820 pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
821 if (IS_ERR(pblk->writer_ts)) {
822 pr_err("pblk: could not allocate writer kthread\n");
1c6286f2 823 return PTR_ERR(pblk->writer_ts);
a4bd217b
JG
824 }
825
826 return 0;
827}
828
829static void pblk_writer_stop(struct pblk *pblk)
830{
831 if (pblk->writer_ts)
832 kthread_stop(pblk->writer_ts);
833 del_timer(&pblk->wtimer);
834}
835
836static void pblk_free(struct pblk *pblk)
837{
838 pblk_luns_free(pblk);
839 pblk_lines_free(pblk);
840 pblk_line_meta_free(pblk);
841 pblk_core_free(pblk);
842 pblk_l2p_free(pblk);
843
844 kfree(pblk);
845}
846
847static void pblk_tear_down(struct pblk *pblk)
848{
849 pblk_flush_writer(pblk);
850 pblk_writer_stop(pblk);
851 pblk_rb_sync_l2p(&pblk->rwb);
852 pblk_recov_pad(pblk);
853 pblk_rwb_free(pblk);
854 pblk_rl_free(&pblk->rl);
855
856 pr_debug("pblk: consistent tear down\n");
857}
858
859static void pblk_exit(void *private)
860{
861 struct pblk *pblk = private;
862
863 down_write(&pblk_lock);
864 pblk_gc_exit(pblk);
865 pblk_tear_down(pblk);
866 pblk_free(pblk);
867 up_write(&pblk_lock);
868}
869
870static sector_t pblk_capacity(void *private)
871{
872 struct pblk *pblk = private;
873
874 return pblk->capacity * NR_PHY_IN_LOG;
875}
876
877static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
878 int flags)
879{
880 struct nvm_geo *geo = &dev->geo;
881 struct request_queue *bqueue = dev->q;
882 struct request_queue *tqueue = tdisk->queue;
883 struct pblk *pblk;
884 int ret;
885
886 if (dev->identity.dom & NVM_RSP_L2P) {
887 pr_err("pblk: device-side L2P table not supported. (%x)\n",
888 dev->identity.dom);
889 return ERR_PTR(-EINVAL);
890 }
891
892 pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
893 if (!pblk)
894 return ERR_PTR(-ENOMEM);
895
896 pblk->dev = dev;
897 pblk->disk = tdisk;
898
899 spin_lock_init(&pblk->trans_lock);
900 spin_lock_init(&pblk->lock);
901
902 if (flags & NVM_TARGET_FACTORY)
903 pblk_setup_uuid(pblk);
904
905#ifdef CONFIG_NVM_DEBUG
906 atomic_long_set(&pblk->inflight_writes, 0);
907 atomic_long_set(&pblk->padded_writes, 0);
908 atomic_long_set(&pblk->padded_wb, 0);
909 atomic_long_set(&pblk->nr_flush, 0);
910 atomic_long_set(&pblk->req_writes, 0);
911 atomic_long_set(&pblk->sub_writes, 0);
912 atomic_long_set(&pblk->sync_writes, 0);
a4bd217b 913 atomic_long_set(&pblk->inflight_reads, 0);
db7ada33 914 atomic_long_set(&pblk->cache_reads, 0);
a4bd217b
JG
915 atomic_long_set(&pblk->sync_reads, 0);
916 atomic_long_set(&pblk->recov_writes, 0);
917 atomic_long_set(&pblk->recov_writes, 0);
918 atomic_long_set(&pblk->recov_gc_writes, 0);
919#endif
920
921 atomic_long_set(&pblk->read_failed, 0);
922 atomic_long_set(&pblk->read_empty, 0);
923 atomic_long_set(&pblk->read_high_ecc, 0);
924 atomic_long_set(&pblk->read_failed_gc, 0);
925 atomic_long_set(&pblk->write_failed, 0);
926 atomic_long_set(&pblk->erase_failed, 0);
927
928 ret = pblk_luns_init(pblk, dev->luns);
929 if (ret) {
930 pr_err("pblk: could not initialize luns\n");
931 goto fail;
932 }
933
934 ret = pblk_lines_init(pblk);
935 if (ret) {
936 pr_err("pblk: could not initialize lines\n");
937 goto fail_free_luns;
938 }
939
940 ret = pblk_core_init(pblk);
941 if (ret) {
942 pr_err("pblk: could not initialize core\n");
943 goto fail_free_line_meta;
944 }
945
946 ret = pblk_l2p_init(pblk);
947 if (ret) {
948 pr_err("pblk: could not initialize maps\n");
949 goto fail_free_core;
950 }
951
952 ret = pblk_lines_configure(pblk, flags);
953 if (ret) {
954 pr_err("pblk: could not configure lines\n");
955 goto fail_free_l2p;
956 }
957
958 ret = pblk_writer_init(pblk);
959 if (ret) {
960 pr_err("pblk: could not initialize write thread\n");
961 goto fail_free_lines;
962 }
963
964 ret = pblk_gc_init(pblk);
965 if (ret) {
966 pr_err("pblk: could not initialize gc\n");
967 goto fail_stop_writer;
968 }
969
970 /* inherit the size from the underlying device */
971 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
972 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
973
974 blk_queue_write_cache(tqueue, true, false);
975
976 tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
977 tqueue->limits.discard_alignment = 0;
978 blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
979 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
980
981 pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
982 geo->nr_luns, pblk->l_mg.nr_lines,
983 (unsigned long long)pblk->rl.nr_secs,
984 pblk->rwb.nr_entries);
985
986 wake_up_process(pblk->writer_ts);
987 return pblk;
988
989fail_stop_writer:
990 pblk_writer_stop(pblk);
991fail_free_lines:
992 pblk_lines_free(pblk);
993fail_free_l2p:
994 pblk_l2p_free(pblk);
995fail_free_core:
996 pblk_core_free(pblk);
997fail_free_line_meta:
998 pblk_line_meta_free(pblk);
999fail_free_luns:
1000 pblk_luns_free(pblk);
1001fail:
1002 kfree(pblk);
1003 return ERR_PTR(ret);
1004}
1005
1006/* physical block device target */
1007static struct nvm_tgt_type tt_pblk = {
1008 .name = "pblk",
1009 .version = {1, 0, 0},
1010
1011 .make_rq = pblk_make_rq,
1012 .capacity = pblk_capacity,
1013
1014 .init = pblk_init,
1015 .exit = pblk_exit,
1016
1017 .sysfs_init = pblk_sysfs_init,
1018 .sysfs_exit = pblk_sysfs_exit,
1019};
1020
1021static int __init pblk_module_init(void)
1022{
b25d5237
N
1023 int ret;
1024
1025 pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
1026 if (!pblk_bio_set)
1027 return -ENOMEM;
1028 ret = nvm_register_tgt_type(&tt_pblk);
1029 if (ret)
1030 bioset_free(pblk_bio_set);
1031 return ret;
a4bd217b
JG
1032}
1033
1034static void pblk_module_exit(void)
1035{
b25d5237 1036 bioset_free(pblk_bio_set);
a4bd217b
JG
1037 nvm_unregister_tgt_type(&tt_pblk);
1038}
1039
1040module_init(pblk_module_init);
1041module_exit(pblk_module_exit);
1042MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1043MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1044MODULE_LICENSE("GPL v2");
1045MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");