27eb430958ff67b616447e68246172844ca2047e
[linux-block.git] / drivers / lightnvm / pblk-init.c
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * Implementation of a physical block-device target for Open-channel SSDs.
17  *
18  * pblk-init.c - pblk's initialization.
19  */
20
21 #include "pblk.h"
22
23 static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
24                                 *pblk_w_rq_cache;
25 static DECLARE_RWSEM(pblk_lock);
26 struct bio_set *pblk_bio_set;
27
28 static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
29                           struct bio *bio)
30 {
31         int ret;
32
33         /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
34          * constraint. Writes can be of arbitrary size.
35          */
36         if (bio_data_dir(bio) == READ) {
37                 blk_queue_split(q, &bio);
38                 ret = pblk_submit_read(pblk, bio);
39                 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
40                         bio_put(bio);
41
42                 return ret;
43         }
44
45         /* Prevent deadlock in the case of a modest LUN configuration and large
46          * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
47          * available for user I/O.
48          */
49         if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
50                 blk_queue_split(q, &bio);
51
52         return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
53 }
54
55 static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
56 {
57         struct pblk *pblk = q->queuedata;
58
59         if (bio_op(bio) == REQ_OP_DISCARD) {
60                 pblk_discard(pblk, bio);
61                 if (!(bio->bi_opf & REQ_PREFLUSH)) {
62                         bio_endio(bio);
63                         return BLK_QC_T_NONE;
64                 }
65         }
66
67         switch (pblk_rw_io(q, pblk, bio)) {
68         case NVM_IO_ERR:
69                 bio_io_error(bio);
70                 break;
71         case NVM_IO_DONE:
72                 bio_endio(bio);
73                 break;
74         }
75
76         return BLK_QC_T_NONE;
77 }
78
79 static void pblk_l2p_free(struct pblk *pblk)
80 {
81         vfree(pblk->trans_map);
82 }
83
84 static int pblk_l2p_init(struct pblk *pblk)
85 {
86         sector_t i;
87         struct ppa_addr ppa;
88         int entry_size = 8;
89
90         if (pblk->ppaf_bitsize < 32)
91                 entry_size = 4;
92
93         pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
94         if (!pblk->trans_map)
95                 return -ENOMEM;
96
97         pblk_ppa_set_empty(&ppa);
98
99         for (i = 0; i < pblk->rl.nr_secs; i++)
100                 pblk_trans_map_set(pblk, i, ppa);
101
102         return 0;
103 }
104
105 static void pblk_rwb_free(struct pblk *pblk)
106 {
107         if (pblk_rb_tear_down_check(&pblk->rwb))
108                 pr_err("pblk: write buffer error on tear down\n");
109
110         pblk_rb_data_free(&pblk->rwb);
111         vfree(pblk_rb_entries_ref(&pblk->rwb));
112 }
113
114 static int pblk_rwb_init(struct pblk *pblk)
115 {
116         struct nvm_tgt_dev *dev = pblk->dev;
117         struct nvm_geo *geo = &dev->geo;
118         struct pblk_rb_entry *entries;
119         unsigned long nr_entries;
120         unsigned int power_size, power_seg_sz;
121
122         nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
123
124         entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
125         if (!entries)
126                 return -ENOMEM;
127
128         power_size = get_count_order(nr_entries);
129         power_seg_sz = get_count_order(geo->sec_size);
130
131         return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
132 }
133
134 /* Minimum pages needed within a lun */
135 #define ADDR_POOL_SIZE 64
136
137 static int pblk_set_ppaf(struct pblk *pblk)
138 {
139         struct nvm_tgt_dev *dev = pblk->dev;
140         struct nvm_geo *geo = &dev->geo;
141         struct nvm_addr_format ppaf = geo->ppaf;
142         int power_len;
143
144         /* Re-calculate channel and lun format to adapt to configuration */
145         power_len = get_count_order(geo->nr_chnls);
146         if (1 << power_len != geo->nr_chnls) {
147                 pr_err("pblk: supports only power-of-two channel config.\n");
148                 return -EINVAL;
149         }
150         ppaf.ch_len = power_len;
151
152         power_len = get_count_order(geo->luns_per_chnl);
153         if (1 << power_len != geo->luns_per_chnl) {
154                 pr_err("pblk: supports only power-of-two LUN config.\n");
155                 return -EINVAL;
156         }
157         ppaf.lun_len = power_len;
158
159         pblk->ppaf.sec_offset = 0;
160         pblk->ppaf.pln_offset = ppaf.sect_len;
161         pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
162         pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
163         pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
164         pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
165         pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
166         pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
167                                                         pblk->ppaf.pln_offset;
168         pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
169                                                         pblk->ppaf.ch_offset;
170         pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
171                                                         pblk->ppaf.lun_offset;
172         pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
173                                                         pblk->ppaf.pg_offset;
174         pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
175                                                         pblk->ppaf.blk_offset;
176
177         pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
178
179         return 0;
180 }
181
182 static int pblk_init_global_caches(struct pblk *pblk)
183 {
184         down_write(&pblk_lock);
185         pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
186                                 sizeof(struct pblk_line_ws), 0, 0, NULL);
187         if (!pblk_ws_cache) {
188                 up_write(&pblk_lock);
189                 return -ENOMEM;
190         }
191
192         pblk_rec_cache = kmem_cache_create("pblk_rec",
193                                 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
194         if (!pblk_rec_cache) {
195                 kmem_cache_destroy(pblk_ws_cache);
196                 up_write(&pblk_lock);
197                 return -ENOMEM;
198         }
199
200         pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
201                                 0, 0, NULL);
202         if (!pblk_g_rq_cache) {
203                 kmem_cache_destroy(pblk_ws_cache);
204                 kmem_cache_destroy(pblk_rec_cache);
205                 up_write(&pblk_lock);
206                 return -ENOMEM;
207         }
208
209         pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
210                                 0, 0, NULL);
211         if (!pblk_w_rq_cache) {
212                 kmem_cache_destroy(pblk_ws_cache);
213                 kmem_cache_destroy(pblk_rec_cache);
214                 kmem_cache_destroy(pblk_g_rq_cache);
215                 up_write(&pblk_lock);
216                 return -ENOMEM;
217         }
218         up_write(&pblk_lock);
219
220         return 0;
221 }
222
223 static void pblk_free_global_caches(struct pblk *pblk)
224 {
225         kmem_cache_destroy(pblk_ws_cache);
226         kmem_cache_destroy(pblk_rec_cache);
227         kmem_cache_destroy(pblk_g_rq_cache);
228         kmem_cache_destroy(pblk_w_rq_cache);
229 }
230
231 static int pblk_core_init(struct pblk *pblk)
232 {
233         struct nvm_tgt_dev *dev = pblk->dev;
234         struct nvm_geo *geo = &dev->geo;
235
236         pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
237                                                 geo->nr_planes * geo->nr_luns;
238
239         if (pblk_init_global_caches(pblk))
240                 return -ENOMEM;
241
242         /* Internal bios can be at most the sectors signaled by the device. */
243         pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
244                                                                         0);
245         if (!pblk->page_bio_pool)
246                 goto free_global_caches;
247
248         pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
249                                                         pblk_ws_cache);
250         if (!pblk->gen_ws_pool)
251                 goto free_page_bio_pool;
252
253         pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
254         if (!pblk->rec_pool)
255                 goto free_gen_ws_pool;
256
257         pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
258                                                         pblk_g_rq_cache);
259         if (!pblk->r_rq_pool)
260                 goto free_rec_pool;
261
262         pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
263                                                         pblk_g_rq_cache);
264         if (!pblk->e_rq_pool)
265                 goto free_r_rq_pool;
266
267         pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
268                                                         pblk_w_rq_cache);
269         if (!pblk->w_rq_pool)
270                 goto free_e_rq_pool;
271
272         pblk->close_wq = alloc_workqueue("pblk-close-wq",
273                         WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
274         if (!pblk->close_wq)
275                 goto free_w_rq_pool;
276
277         pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
278                         WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
279         if (!pblk->bb_wq)
280                 goto free_close_wq;
281
282         pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
283                         WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
284         if (!pblk->r_end_wq)
285                 goto free_bb_wq;
286
287         if (pblk_set_ppaf(pblk))
288                 goto free_r_end_wq;
289
290         if (pblk_rwb_init(pblk))
291                 goto free_r_end_wq;
292
293         INIT_LIST_HEAD(&pblk->compl_list);
294         return 0;
295
296 free_r_end_wq:
297         destroy_workqueue(pblk->r_end_wq);
298 free_bb_wq:
299         destroy_workqueue(pblk->bb_wq);
300 free_close_wq:
301         destroy_workqueue(pblk->close_wq);
302 free_w_rq_pool:
303         mempool_destroy(pblk->w_rq_pool);
304 free_e_rq_pool:
305         mempool_destroy(pblk->e_rq_pool);
306 free_r_rq_pool:
307         mempool_destroy(pblk->r_rq_pool);
308 free_rec_pool:
309         mempool_destroy(pblk->rec_pool);
310 free_gen_ws_pool:
311         mempool_destroy(pblk->gen_ws_pool);
312 free_page_bio_pool:
313         mempool_destroy(pblk->page_bio_pool);
314 free_global_caches:
315         pblk_free_global_caches(pblk);
316         return -ENOMEM;
317 }
318
319 static void pblk_core_free(struct pblk *pblk)
320 {
321         if (pblk->close_wq)
322                 destroy_workqueue(pblk->close_wq);
323
324         if (pblk->r_end_wq)
325                 destroy_workqueue(pblk->r_end_wq);
326
327         if (pblk->bb_wq)
328                 destroy_workqueue(pblk->bb_wq);
329
330         mempool_destroy(pblk->page_bio_pool);
331         mempool_destroy(pblk->gen_ws_pool);
332         mempool_destroy(pblk->rec_pool);
333         mempool_destroy(pblk->r_rq_pool);
334         mempool_destroy(pblk->e_rq_pool);
335         mempool_destroy(pblk->w_rq_pool);
336
337         pblk_free_global_caches(pblk);
338 }
339
340 static void pblk_luns_free(struct pblk *pblk)
341 {
342         kfree(pblk->luns);
343 }
344
345 static void pblk_free_line_bitmaps(struct pblk_line *line)
346 {
347         kfree(line->blk_bitmap);
348         kfree(line->erase_bitmap);
349 }
350
351 static void pblk_lines_free(struct pblk *pblk)
352 {
353         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
354         struct pblk_line *line;
355         int i;
356
357         spin_lock(&l_mg->free_lock);
358         for (i = 0; i < l_mg->nr_lines; i++) {
359                 line = &pblk->lines[i];
360
361                 pblk_line_free(pblk, line);
362                 pblk_free_line_bitmaps(line);
363         }
364         spin_unlock(&l_mg->free_lock);
365 }
366
367 static void pblk_line_meta_free(struct pblk *pblk)
368 {
369         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
370         int i;
371
372         kfree(l_mg->bb_template);
373         kfree(l_mg->bb_aux);
374         kfree(l_mg->vsc_list);
375
376         spin_lock(&l_mg->free_lock);
377         for (i = 0; i < PBLK_DATA_LINES; i++) {
378                 kfree(l_mg->sline_meta[i]);
379                 pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
380                 kfree(l_mg->eline_meta[i]);
381         }
382         spin_unlock(&l_mg->free_lock);
383
384         kfree(pblk->lines);
385 }
386
387 static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
388 {
389         struct nvm_geo *geo = &dev->geo;
390         struct ppa_addr ppa;
391         u8 *blks;
392         int nr_blks, ret;
393
394         nr_blks = geo->blks_per_lun * geo->plane_mode;
395         blks = kmalloc(nr_blks, GFP_KERNEL);
396         if (!blks)
397                 return -ENOMEM;
398
399         ppa.ppa = 0;
400         ppa.g.ch = rlun->bppa.g.ch;
401         ppa.g.lun = rlun->bppa.g.lun;
402
403         ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
404         if (ret)
405                 goto out;
406
407         nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
408         if (nr_blks < 0) {
409                 ret = nr_blks;
410                 goto out;
411         }
412
413         rlun->bb_list = blks;
414
415         return 0;
416 out:
417         kfree(blks);
418         return ret;
419 }
420
421 static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
422                         int blk_per_line)
423 {
424         struct nvm_tgt_dev *dev = pblk->dev;
425         struct nvm_geo *geo = &dev->geo;
426         struct pblk_lun *rlun;
427         int bb_cnt = 0;
428         int i;
429
430         for (i = 0; i < blk_per_line; i++) {
431                 rlun = &pblk->luns[i];
432                 if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
433                         continue;
434
435                 set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
436                 bb_cnt++;
437         }
438
439         return bb_cnt;
440 }
441
442 static int pblk_alloc_line_bitmaps(struct pblk *pblk, struct pblk_line *line)
443 {
444         struct pblk_line_meta *lm = &pblk->lm;
445
446         line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
447         if (!line->blk_bitmap)
448                 return -ENOMEM;
449
450         line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
451         if (!line->erase_bitmap) {
452                 kfree(line->blk_bitmap);
453                 return -ENOMEM;
454         }
455
456         return 0;
457 }
458
459 static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
460 {
461         struct nvm_tgt_dev *dev = pblk->dev;
462         struct nvm_geo *geo = &dev->geo;
463         struct pblk_lun *rlun;
464         int i, ret;
465
466         /* TODO: Implement unbalanced LUN support */
467         if (geo->luns_per_chnl < 0) {
468                 pr_err("pblk: unbalanced LUN config.\n");
469                 return -EINVAL;
470         }
471
472         pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
473         if (!pblk->luns)
474                 return -ENOMEM;
475
476         for (i = 0; i < geo->nr_luns; i++) {
477                 /* Stripe across channels */
478                 int ch = i % geo->nr_chnls;
479                 int lun_raw = i / geo->nr_chnls;
480                 int lunid = lun_raw + ch * geo->luns_per_chnl;
481
482                 rlun = &pblk->luns[i];
483                 rlun->bppa = luns[lunid];
484
485                 sema_init(&rlun->wr_sem, 1);
486
487                 ret = pblk_bb_discovery(dev, rlun);
488                 if (ret) {
489                         while (--i >= 0)
490                                 kfree(pblk->luns[i].bb_list);
491                         return ret;
492                 }
493         }
494
495         return 0;
496 }
497
498 static int pblk_lines_configure(struct pblk *pblk, int flags)
499 {
500         struct pblk_line *line = NULL;
501         int ret = 0;
502
503         if (!(flags & NVM_TARGET_FACTORY)) {
504                 line = pblk_recov_l2p(pblk);
505                 if (IS_ERR(line)) {
506                         pr_err("pblk: could not recover l2p table\n");
507                         ret = -EFAULT;
508                 }
509         }
510
511         if (!line) {
512                 /* Configure next line for user data */
513                 line = pblk_line_get_first_data(pblk);
514                 if (!line) {
515                         pr_err("pblk: line list corrupted\n");
516                         ret = -EFAULT;
517                 }
518         }
519
520         return ret;
521 }
522
523 /* See comment over struct line_emeta definition */
524 static unsigned int calc_emeta_len(struct pblk *pblk)
525 {
526         struct pblk_line_meta *lm = &pblk->lm;
527         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
528         struct nvm_tgt_dev *dev = pblk->dev;
529         struct nvm_geo *geo = &dev->geo;
530
531         /* Round to sector size so that lba_list starts on its own sector */
532         lm->emeta_sec[1] = DIV_ROUND_UP(
533                         sizeof(struct line_emeta) + lm->blk_bitmap_len,
534                         geo->sec_size);
535         lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;
536
537         /* Round to sector size so that vsc_list starts on its own sector */
538         lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
539         lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
540                         geo->sec_size);
541         lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;
542
543         lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
544                         geo->sec_size);
545         lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;
546
547         lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
548
549         return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
550 }
551
552 static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
553 {
554         struct nvm_tgt_dev *dev = pblk->dev;
555         struct nvm_geo *geo = &dev->geo;
556         sector_t provisioned;
557
558         pblk->over_pct = 20;
559
560         provisioned = nr_free_blks;
561         provisioned *= (100 - pblk->over_pct);
562         sector_div(provisioned, 100);
563
564         /* Internally pblk manages all free blocks, but all calculations based
565          * on user capacity consider only provisioned blocks
566          */
567         pblk->rl.total_blocks = nr_free_blks;
568         pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
569         pblk->capacity = provisioned * geo->sec_per_blk;
570         atomic_set(&pblk->rl.free_blocks, nr_free_blks);
571 }
572
573 static int pblk_lines_alloc_metadata(struct pblk *pblk)
574 {
575         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
576         struct pblk_line_meta *lm = &pblk->lm;
577         int i;
578
579         /* smeta is always small enough to fit on a kmalloc memory allocation,
580          * emeta depends on the number of LUNs allocated to the pblk instance
581          */
582         for (i = 0; i < PBLK_DATA_LINES; i++) {
583                 l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
584                 if (!l_mg->sline_meta[i])
585                         goto fail_free_smeta;
586         }
587
588         /* emeta allocates three different buffers for managing metadata with
589          * in-memory and in-media layouts
590          */
591         for (i = 0; i < PBLK_DATA_LINES; i++) {
592                 struct pblk_emeta *emeta;
593
594                 emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
595                 if (!emeta)
596                         goto fail_free_emeta;
597
598                 if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
599                         l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
600
601                         emeta->buf = vmalloc(lm->emeta_len[0]);
602                         if (!emeta->buf) {
603                                 kfree(emeta);
604                                 goto fail_free_emeta;
605                         }
606
607                         emeta->nr_entries = lm->emeta_sec[0];
608                         l_mg->eline_meta[i] = emeta;
609                 } else {
610                         l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
611
612                         emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
613                         if (!emeta->buf) {
614                                 kfree(emeta);
615                                 goto fail_free_emeta;
616                         }
617
618                         emeta->nr_entries = lm->emeta_sec[0];
619                         l_mg->eline_meta[i] = emeta;
620                 }
621         }
622
623         l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
624         if (!l_mg->vsc_list)
625                 goto fail_free_emeta;
626
627         for (i = 0; i < l_mg->nr_lines; i++)
628                 l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
629
630         return 0;
631
632 fail_free_emeta:
633         while (--i >= 0) {
634                 if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
635                         vfree(l_mg->eline_meta[i]->buf);
636                 else
637                         kfree(l_mg->eline_meta[i]->buf);
638                 kfree(l_mg->eline_meta[i]);
639         }
640
641 fail_free_smeta:
642         for (i = 0; i < PBLK_DATA_LINES; i++)
643                 kfree(l_mg->sline_meta[i]);
644
645         return -ENOMEM;
646 }
647
648 static int pblk_lines_init(struct pblk *pblk)
649 {
650         struct nvm_tgt_dev *dev = pblk->dev;
651         struct nvm_geo *geo = &dev->geo;
652         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
653         struct pblk_line_meta *lm = &pblk->lm;
654         struct pblk_line *line;
655         unsigned int smeta_len, emeta_len;
656         long nr_bad_blks, nr_free_blks;
657         int bb_distance, max_write_ppas, mod;
658         int i, ret;
659
660         pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
661         max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
662         pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
663                                 max_write_ppas : nvm_max_phys_sects(dev);
664         pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
665
666         if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
667                 pr_err("pblk: cannot support device max_phys_sect\n");
668                 return -EINVAL;
669         }
670
671         div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
672         if (mod) {
673                 pr_err("pblk: bad configuration of sectors/pages\n");
674                 return -EINVAL;
675         }
676
677         l_mg->nr_lines = geo->blks_per_lun;
678         l_mg->log_line = l_mg->data_line = NULL;
679         l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
680         l_mg->nr_free_lines = 0;
681         bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
682
683         lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
684         lm->blk_per_line = geo->nr_luns;
685         lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
686         lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
687         lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
688         lm->mid_thrs = lm->sec_per_line / 2;
689         lm->high_thrs = lm->sec_per_line / 4;
690         lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
691
692         /* Calculate necessary pages for smeta. See comment over struct
693          * line_smeta definition
694          */
695         i = 1;
696 add_smeta_page:
697         lm->smeta_sec = i * geo->sec_per_pl;
698         lm->smeta_len = lm->smeta_sec * geo->sec_size;
699
700         smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
701         if (smeta_len > lm->smeta_len) {
702                 i++;
703                 goto add_smeta_page;
704         }
705
706         /* Calculate necessary pages for emeta. See comment over struct
707          * line_emeta definition
708          */
709         i = 1;
710 add_emeta_page:
711         lm->emeta_sec[0] = i * geo->sec_per_pl;
712         lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
713
714         emeta_len = calc_emeta_len(pblk);
715         if (emeta_len > lm->emeta_len[0]) {
716                 i++;
717                 goto add_emeta_page;
718         }
719
720         lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
721
722         lm->min_blk_line = 1;
723         if (geo->nr_luns > 1)
724                 lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
725                                         lm->emeta_sec[0], geo->sec_per_blk);
726
727         if (lm->min_blk_line > lm->blk_per_line) {
728                 pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
729                                                         lm->blk_per_line);
730                 ret = -EINVAL;
731                 goto fail;
732         }
733
734         ret = pblk_lines_alloc_metadata(pblk);
735         if (ret)
736                 goto fail;
737
738         l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
739         if (!l_mg->bb_template) {
740                 ret = -ENOMEM;
741                 goto fail_free_meta;
742         }
743
744         l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
745         if (!l_mg->bb_aux) {
746                 ret = -ENOMEM;
747                 goto fail_free_bb_template;
748         }
749
750         bb_distance = (geo->nr_luns) * geo->sec_per_pl;
751         for (i = 0; i < lm->sec_per_line; i += bb_distance)
752                 bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
753
754         INIT_LIST_HEAD(&l_mg->free_list);
755         INIT_LIST_HEAD(&l_mg->corrupt_list);
756         INIT_LIST_HEAD(&l_mg->bad_list);
757         INIT_LIST_HEAD(&l_mg->gc_full_list);
758         INIT_LIST_HEAD(&l_mg->gc_high_list);
759         INIT_LIST_HEAD(&l_mg->gc_mid_list);
760         INIT_LIST_HEAD(&l_mg->gc_low_list);
761         INIT_LIST_HEAD(&l_mg->gc_empty_list);
762
763         INIT_LIST_HEAD(&l_mg->emeta_list);
764
765         l_mg->gc_lists[0] = &l_mg->gc_high_list;
766         l_mg->gc_lists[1] = &l_mg->gc_mid_list;
767         l_mg->gc_lists[2] = &l_mg->gc_low_list;
768
769         spin_lock_init(&l_mg->free_lock);
770         spin_lock_init(&l_mg->close_lock);
771         spin_lock_init(&l_mg->gc_lock);
772
773         pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
774                                                                 GFP_KERNEL);
775         if (!pblk->lines) {
776                 ret = -ENOMEM;
777                 goto fail_free_bb_aux;
778         }
779
780         nr_free_blks = 0;
781         for (i = 0; i < l_mg->nr_lines; i++) {
782                 int blk_in_line;
783
784                 line = &pblk->lines[i];
785
786                 line->pblk = pblk;
787                 line->id = i;
788                 line->type = PBLK_LINETYPE_FREE;
789                 line->state = PBLK_LINESTATE_FREE;
790                 line->gc_group = PBLK_LINEGC_NONE;
791                 line->vsc = &l_mg->vsc_list[i];
792                 spin_lock_init(&line->lock);
793
794                 ret = pblk_alloc_line_bitmaps(pblk, line);
795                 if (ret)
796                         goto fail_free_lines;
797
798                 nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line);
799                 if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
800                         pblk_free_line_bitmaps(line);
801                         ret = -EINVAL;
802                         goto fail_free_lines;
803                 }
804
805                 blk_in_line = lm->blk_per_line - nr_bad_blks;
806                 if (blk_in_line < lm->min_blk_line) {
807                         line->state = PBLK_LINESTATE_BAD;
808                         list_add_tail(&line->list, &l_mg->bad_list);
809                         continue;
810                 }
811
812                 nr_free_blks += blk_in_line;
813                 atomic_set(&line->blk_in_line, blk_in_line);
814
815                 l_mg->nr_free_lines++;
816                 list_add_tail(&line->list, &l_mg->free_list);
817         }
818
819         pblk_set_provision(pblk, nr_free_blks);
820
821         /* Cleanup per-LUN bad block lists - managed within lines on run-time */
822         for (i = 0; i < geo->nr_luns; i++)
823                 kfree(pblk->luns[i].bb_list);
824
825         return 0;
826 fail_free_lines:
827         while (--i >= 0)
828                 pblk_free_line_bitmaps(&pblk->lines[i]);
829 fail_free_bb_aux:
830         kfree(l_mg->bb_aux);
831 fail_free_bb_template:
832         kfree(l_mg->bb_template);
833 fail_free_meta:
834         pblk_line_meta_free(pblk);
835 fail:
836         for (i = 0; i < geo->nr_luns; i++)
837                 kfree(pblk->luns[i].bb_list);
838
839         return ret;
840 }
841
842 static int pblk_writer_init(struct pblk *pblk)
843 {
844         setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
845         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
846
847         pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
848         if (IS_ERR(pblk->writer_ts)) {
849                 pr_err("pblk: could not allocate writer kthread\n");
850                 return PTR_ERR(pblk->writer_ts);
851         }
852
853         return 0;
854 }
855
856 static void pblk_writer_stop(struct pblk *pblk)
857 {
858         /* The pipeline must be stopped and the write buffer emptied before the
859          * write thread is stopped
860          */
861         WARN(pblk_rb_read_count(&pblk->rwb),
862                         "Stopping not fully persisted write buffer\n");
863
864         WARN(pblk_rb_sync_count(&pblk->rwb),
865                         "Stopping not fully synced write buffer\n");
866
867         if (pblk->writer_ts)
868                 kthread_stop(pblk->writer_ts);
869         del_timer(&pblk->wtimer);
870 }
871
872 static void pblk_free(struct pblk *pblk)
873 {
874         pblk_luns_free(pblk);
875         pblk_lines_free(pblk);
876         pblk_line_meta_free(pblk);
877         pblk_core_free(pblk);
878         pblk_l2p_free(pblk);
879
880         kfree(pblk);
881 }
882
883 static void pblk_tear_down(struct pblk *pblk)
884 {
885         pblk_pipeline_stop(pblk);
886         pblk_writer_stop(pblk);
887         pblk_rb_sync_l2p(&pblk->rwb);
888         pblk_rwb_free(pblk);
889         pblk_rl_free(&pblk->rl);
890
891         pr_debug("pblk: consistent tear down\n");
892 }
893
894 static void pblk_exit(void *private)
895 {
896         struct pblk *pblk = private;
897
898         down_write(&pblk_lock);
899         pblk_gc_exit(pblk);
900         pblk_tear_down(pblk);
901         pblk_free(pblk);
902         up_write(&pblk_lock);
903 }
904
905 static sector_t pblk_capacity(void *private)
906 {
907         struct pblk *pblk = private;
908
909         return pblk->capacity * NR_PHY_IN_LOG;
910 }
911
912 static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
913                        int flags)
914 {
915         struct nvm_geo *geo = &dev->geo;
916         struct request_queue *bqueue = dev->q;
917         struct request_queue *tqueue = tdisk->queue;
918         struct pblk *pblk;
919         int ret;
920
921         if (dev->identity.dom & NVM_RSP_L2P) {
922                 pr_err("pblk: host-side L2P table not supported. (%x)\n",
923                                                         dev->identity.dom);
924                 return ERR_PTR(-EINVAL);
925         }
926
927         pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
928         if (!pblk)
929                 return ERR_PTR(-ENOMEM);
930
931         pblk->dev = dev;
932         pblk->disk = tdisk;
933         pblk->state = PBLK_STATE_RUNNING;
934         pblk->gc.gc_enabled = 0;
935
936         spin_lock_init(&pblk->trans_lock);
937         spin_lock_init(&pblk->lock);
938
939         if (flags & NVM_TARGET_FACTORY)
940                 pblk_setup_uuid(pblk);
941
942 #ifdef CONFIG_NVM_DEBUG
943         atomic_long_set(&pblk->inflight_writes, 0);
944         atomic_long_set(&pblk->padded_writes, 0);
945         atomic_long_set(&pblk->padded_wb, 0);
946         atomic_long_set(&pblk->nr_flush, 0);
947         atomic_long_set(&pblk->req_writes, 0);
948         atomic_long_set(&pblk->sub_writes, 0);
949         atomic_long_set(&pblk->sync_writes, 0);
950         atomic_long_set(&pblk->inflight_reads, 0);
951         atomic_long_set(&pblk->cache_reads, 0);
952         atomic_long_set(&pblk->sync_reads, 0);
953         atomic_long_set(&pblk->recov_writes, 0);
954         atomic_long_set(&pblk->recov_writes, 0);
955         atomic_long_set(&pblk->recov_gc_writes, 0);
956         atomic_long_set(&pblk->recov_gc_reads, 0);
957 #endif
958
959         atomic_long_set(&pblk->read_failed, 0);
960         atomic_long_set(&pblk->read_empty, 0);
961         atomic_long_set(&pblk->read_high_ecc, 0);
962         atomic_long_set(&pblk->read_failed_gc, 0);
963         atomic_long_set(&pblk->write_failed, 0);
964         atomic_long_set(&pblk->erase_failed, 0);
965
966         ret = pblk_luns_init(pblk, dev->luns);
967         if (ret) {
968                 pr_err("pblk: could not initialize luns\n");
969                 goto fail;
970         }
971
972         ret = pblk_lines_init(pblk);
973         if (ret) {
974                 pr_err("pblk: could not initialize lines\n");
975                 goto fail_free_luns;
976         }
977
978         ret = pblk_core_init(pblk);
979         if (ret) {
980                 pr_err("pblk: could not initialize core\n");
981                 goto fail_free_line_meta;
982         }
983
984         ret = pblk_l2p_init(pblk);
985         if (ret) {
986                 pr_err("pblk: could not initialize maps\n");
987                 goto fail_free_core;
988         }
989
990         ret = pblk_lines_configure(pblk, flags);
991         if (ret) {
992                 pr_err("pblk: could not configure lines\n");
993                 goto fail_free_l2p;
994         }
995
996         ret = pblk_writer_init(pblk);
997         if (ret) {
998                 pr_err("pblk: could not initialize write thread\n");
999                 goto fail_free_lines;
1000         }
1001
1002         ret = pblk_gc_init(pblk);
1003         if (ret) {
1004                 pr_err("pblk: could not initialize gc\n");
1005                 goto fail_stop_writer;
1006         }
1007
1008         /* inherit the size from the underlying device */
1009         blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1010         blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1011
1012         blk_queue_write_cache(tqueue, true, false);
1013
1014         tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
1015         tqueue->limits.discard_alignment = 0;
1016         blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
1017         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
1018
1019         pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
1020                         geo->nr_luns, pblk->l_mg.nr_lines,
1021                         (unsigned long long)pblk->rl.nr_secs,
1022                         pblk->rwb.nr_entries);
1023
1024         wake_up_process(pblk->writer_ts);
1025         return pblk;
1026
1027 fail_stop_writer:
1028         pblk_writer_stop(pblk);
1029 fail_free_lines:
1030         pblk_lines_free(pblk);
1031 fail_free_l2p:
1032         pblk_l2p_free(pblk);
1033 fail_free_core:
1034         pblk_core_free(pblk);
1035 fail_free_line_meta:
1036         pblk_line_meta_free(pblk);
1037 fail_free_luns:
1038         pblk_luns_free(pblk);
1039 fail:
1040         kfree(pblk);
1041         return ERR_PTR(ret);
1042 }
1043
1044 /* physical block device target */
1045 static struct nvm_tgt_type tt_pblk = {
1046         .name           = "pblk",
1047         .version        = {1, 0, 0},
1048
1049         .make_rq        = pblk_make_rq,
1050         .capacity       = pblk_capacity,
1051
1052         .init           = pblk_init,
1053         .exit           = pblk_exit,
1054
1055         .sysfs_init     = pblk_sysfs_init,
1056         .sysfs_exit     = pblk_sysfs_exit,
1057         .owner          = THIS_MODULE,
1058 };
1059
1060 static int __init pblk_module_init(void)
1061 {
1062         int ret;
1063
1064         pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
1065         if (!pblk_bio_set)
1066                 return -ENOMEM;
1067         ret = nvm_register_tgt_type(&tt_pblk);
1068         if (ret)
1069                 bioset_free(pblk_bio_set);
1070         return ret;
1071 }
1072
1073 static void pblk_module_exit(void)
1074 {
1075         bioset_free(pblk_bio_set);
1076         nvm_unregister_tgt_type(&tt_pblk);
1077 }
1078
1079 module_init(pblk_module_init);
1080 module_exit(pblk_module_exit);
1081 MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1082 MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1083 MODULE_LICENSE("GPL v2");
1084 MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");