Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[linux-2.6-block.git] / drivers / lightnvm / pblk-core.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18
19 #include "pblk.h"
20
21 static void pblk_line_mark_bb(struct work_struct *work)
22 {
23         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
24                                                                         ws);
25         struct pblk *pblk = line_ws->pblk;
26         struct nvm_tgt_dev *dev = pblk->dev;
27         struct ppa_addr *ppa = line_ws->priv;
28         int ret;
29
30         ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
31         if (ret) {
32                 struct pblk_line *line;
33                 int pos;
34
35                 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
36                 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
37
38                 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
39                                 line->id, pos);
40         }
41
42         kfree(ppa);
43         mempool_free(line_ws, pblk->gen_ws_pool);
44 }
45
46 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
47                          struct ppa_addr *ppa)
48 {
49         struct nvm_tgt_dev *dev = pblk->dev;
50         struct nvm_geo *geo = &dev->geo;
51         int pos = pblk_dev_ppa_to_pos(geo, *ppa);
52
53         pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
54         atomic_long_inc(&pblk->erase_failed);
55
56         atomic_dec(&line->blk_in_line);
57         if (test_and_set_bit(pos, line->blk_bitmap))
58                 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
59                                                         line->id, pos);
60
61         pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
62                                                 GFP_ATOMIC, pblk->bb_wq);
63 }
64
65 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
66 {
67         struct pblk_line *line;
68
69         line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
70         atomic_dec(&line->left_seblks);
71
72         if (rqd->error) {
73                 struct ppa_addr *ppa;
74
75                 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
76                 if (!ppa)
77                         return;
78
79                 *ppa = rqd->ppa_addr;
80                 pblk_mark_bb(pblk, line, ppa);
81         }
82
83         atomic_dec(&pblk->inflight_io);
84 }
85
86 /* Erase completion assumes that only one block is erased at the time */
87 static void pblk_end_io_erase(struct nvm_rq *rqd)
88 {
89         struct pblk *pblk = rqd->private;
90
91         __pblk_end_io_erase(pblk, rqd);
92         mempool_free(rqd, pblk->e_rq_pool);
93 }
94
95 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
96                            u64 paddr)
97 {
98         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
99         struct list_head *move_list = NULL;
100
101         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
102          * table is modified with reclaimed sectors, a check is done to endure
103          * that newer updates are not overwritten.
104          */
105         spin_lock(&line->lock);
106         WARN_ON(line->state == PBLK_LINESTATE_FREE);
107
108         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
109                 WARN_ONCE(1, "pblk: double invalidate\n");
110                 spin_unlock(&line->lock);
111                 return;
112         }
113         le32_add_cpu(line->vsc, -1);
114
115         if (line->state == PBLK_LINESTATE_CLOSED)
116                 move_list = pblk_line_gc_list(pblk, line);
117         spin_unlock(&line->lock);
118
119         if (move_list) {
120                 spin_lock(&l_mg->gc_lock);
121                 spin_lock(&line->lock);
122                 /* Prevent moving a line that has just been chosen for GC */
123                 if (line->state == PBLK_LINESTATE_GC) {
124                         spin_unlock(&line->lock);
125                         spin_unlock(&l_mg->gc_lock);
126                         return;
127                 }
128                 spin_unlock(&line->lock);
129
130                 list_move_tail(&line->list, move_list);
131                 spin_unlock(&l_mg->gc_lock);
132         }
133 }
134
135 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
136 {
137         struct pblk_line *line;
138         u64 paddr;
139         int line_id;
140
141 #ifdef CONFIG_NVM_DEBUG
142         /* Callers must ensure that the ppa points to a device address */
143         BUG_ON(pblk_addr_in_cache(ppa));
144         BUG_ON(pblk_ppa_empty(ppa));
145 #endif
146
147         line_id = pblk_tgt_ppa_to_line(ppa);
148         line = &pblk->lines[line_id];
149         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
150
151         __pblk_map_invalidate(pblk, line, paddr);
152 }
153
154 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
155                                   unsigned int nr_secs)
156 {
157         sector_t lba;
158
159         spin_lock(&pblk->trans_lock);
160         for (lba = slba; lba < slba + nr_secs; lba++) {
161                 struct ppa_addr ppa;
162
163                 ppa = pblk_trans_map_get(pblk, lba);
164
165                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
166                         pblk_map_invalidate(pblk, ppa);
167
168                 pblk_ppa_set_empty(&ppa);
169                 pblk_trans_map_set(pblk, lba, ppa);
170         }
171         spin_unlock(&pblk->trans_lock);
172 }
173
174 /* Caller must guarantee that the request is a valid type */
175 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
176 {
177         mempool_t *pool;
178         struct nvm_rq *rqd;
179         int rq_size;
180
181         switch (type) {
182         case PBLK_WRITE:
183         case PBLK_WRITE_INT:
184                 pool = pblk->w_rq_pool;
185                 rq_size = pblk_w_rq_size;
186                 break;
187         case PBLK_READ:
188                 pool = pblk->r_rq_pool;
189                 rq_size = pblk_g_rq_size;
190                 break;
191         default:
192                 pool = pblk->e_rq_pool;
193                 rq_size = pblk_g_rq_size;
194         }
195
196         rqd = mempool_alloc(pool, GFP_KERNEL);
197         memset(rqd, 0, rq_size);
198
199         return rqd;
200 }
201
202 /* Typically used on completion path. Cannot guarantee request consistency */
203 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
204 {
205         struct nvm_tgt_dev *dev = pblk->dev;
206         mempool_t *pool;
207
208         switch (type) {
209         case PBLK_WRITE:
210                 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
211         case PBLK_WRITE_INT:
212                 pool = pblk->w_rq_pool;
213                 break;
214         case PBLK_READ:
215                 pool = pblk->r_rq_pool;
216                 break;
217         case PBLK_ERASE:
218                 pool = pblk->e_rq_pool;
219                 break;
220         default:
221                 pr_err("pblk: trying to free unknown rqd type\n");
222                 return;
223         }
224
225         nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
226         mempool_free(rqd, pool);
227 }
228
229 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
230                          int nr_pages)
231 {
232         struct bio_vec bv;
233         int i;
234
235         WARN_ON(off + nr_pages != bio->bi_vcnt);
236
237         for (i = off; i < nr_pages + off; i++) {
238                 bv = bio->bi_io_vec[i];
239                 mempool_free(bv.bv_page, pblk->page_bio_pool);
240         }
241 }
242
243 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
244                        int nr_pages)
245 {
246         struct request_queue *q = pblk->dev->q;
247         struct page *page;
248         int i, ret;
249
250         for (i = 0; i < nr_pages; i++) {
251                 page = mempool_alloc(pblk->page_bio_pool, flags);
252
253                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
254                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
255                         pr_err("pblk: could not add page to bio\n");
256                         mempool_free(page, pblk->page_bio_pool);
257                         goto err;
258                 }
259         }
260
261         return 0;
262 err:
263         pblk_bio_free_pages(pblk, bio, 0, i - 1);
264         return -1;
265 }
266
267 static void pblk_write_kick(struct pblk *pblk)
268 {
269         wake_up_process(pblk->writer_ts);
270         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
271 }
272
273 void pblk_write_timer_fn(struct timer_list *t)
274 {
275         struct pblk *pblk = from_timer(pblk, t, wtimer);
276
277         /* kick the write thread every tick to flush outstanding data */
278         pblk_write_kick(pblk);
279 }
280
281 void pblk_write_should_kick(struct pblk *pblk)
282 {
283         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
284
285         if (secs_avail >= pblk->min_write_pgs)
286                 pblk_write_kick(pblk);
287 }
288
289 void pblk_end_io_sync(struct nvm_rq *rqd)
290 {
291         struct completion *waiting = rqd->private;
292
293         complete(waiting);
294 }
295
296 static void pblk_wait_for_meta(struct pblk *pblk)
297 {
298         do {
299                 if (!atomic_read(&pblk->inflight_io))
300                         break;
301
302                 schedule();
303         } while (1);
304 }
305
306 static void pblk_flush_writer(struct pblk *pblk)
307 {
308         pblk_rb_flush(&pblk->rwb);
309         do {
310                 if (!pblk_rb_sync_count(&pblk->rwb))
311                         break;
312
313                 pblk_write_kick(pblk);
314                 schedule();
315         } while (1);
316 }
317
318 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
319 {
320         struct pblk_line_meta *lm = &pblk->lm;
321         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
322         struct list_head *move_list = NULL;
323         int vsc = le32_to_cpu(*line->vsc);
324
325         lockdep_assert_held(&line->lock);
326
327         if (!vsc) {
328                 if (line->gc_group != PBLK_LINEGC_FULL) {
329                         line->gc_group = PBLK_LINEGC_FULL;
330                         move_list = &l_mg->gc_full_list;
331                 }
332         } else if (vsc < lm->high_thrs) {
333                 if (line->gc_group != PBLK_LINEGC_HIGH) {
334                         line->gc_group = PBLK_LINEGC_HIGH;
335                         move_list = &l_mg->gc_high_list;
336                 }
337         } else if (vsc < lm->mid_thrs) {
338                 if (line->gc_group != PBLK_LINEGC_MID) {
339                         line->gc_group = PBLK_LINEGC_MID;
340                         move_list = &l_mg->gc_mid_list;
341                 }
342         } else if (vsc < line->sec_in_line) {
343                 if (line->gc_group != PBLK_LINEGC_LOW) {
344                         line->gc_group = PBLK_LINEGC_LOW;
345                         move_list = &l_mg->gc_low_list;
346                 }
347         } else if (vsc == line->sec_in_line) {
348                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
349                         line->gc_group = PBLK_LINEGC_EMPTY;
350                         move_list = &l_mg->gc_empty_list;
351                 }
352         } else {
353                 line->state = PBLK_LINESTATE_CORRUPT;
354                 line->gc_group = PBLK_LINEGC_NONE;
355                 move_list =  &l_mg->corrupt_list;
356                 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
357                                                 line->id, vsc,
358                                                 line->sec_in_line,
359                                                 lm->high_thrs, lm->mid_thrs);
360         }
361
362         return move_list;
363 }
364
365 void pblk_discard(struct pblk *pblk, struct bio *bio)
366 {
367         sector_t slba = pblk_get_lba(bio);
368         sector_t nr_secs = pblk_get_secs(bio);
369
370         pblk_invalidate_range(pblk, slba, nr_secs);
371 }
372
373 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
374 {
375         atomic_long_inc(&pblk->write_failed);
376 #ifdef CONFIG_NVM_DEBUG
377         pblk_print_failed_rqd(pblk, rqd, rqd->error);
378 #endif
379 }
380
381 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
382 {
383         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
384         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
385                 atomic_long_inc(&pblk->read_empty);
386                 return;
387         }
388
389         switch (rqd->error) {
390         case NVM_RSP_WARN_HIGHECC:
391                 atomic_long_inc(&pblk->read_high_ecc);
392                 break;
393         case NVM_RSP_ERR_FAILECC:
394         case NVM_RSP_ERR_FAILCRC:
395                 atomic_long_inc(&pblk->read_failed);
396                 break;
397         default:
398                 pr_err("pblk: unknown read error:%d\n", rqd->error);
399         }
400 #ifdef CONFIG_NVM_DEBUG
401         pblk_print_failed_rqd(pblk, rqd, rqd->error);
402 #endif
403 }
404
405 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
406 {
407         pblk->sec_per_write = sec_per_write;
408 }
409
410 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
411 {
412         struct nvm_tgt_dev *dev = pblk->dev;
413
414 #ifdef CONFIG_NVM_DEBUG
415         int ret;
416
417         ret = pblk_check_io(pblk, rqd);
418         if (ret)
419                 return ret;
420 #endif
421
422         atomic_inc(&pblk->inflight_io);
423
424         return nvm_submit_io(dev, rqd);
425 }
426
427 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
428 {
429         struct nvm_tgt_dev *dev = pblk->dev;
430
431 #ifdef CONFIG_NVM_DEBUG
432         int ret;
433
434         ret = pblk_check_io(pblk, rqd);
435         if (ret)
436                 return ret;
437 #endif
438
439         atomic_inc(&pblk->inflight_io);
440
441         return nvm_submit_io_sync(dev, rqd);
442 }
443
444 static void pblk_bio_map_addr_endio(struct bio *bio)
445 {
446         bio_put(bio);
447 }
448
449 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
450                               unsigned int nr_secs, unsigned int len,
451                               int alloc_type, gfp_t gfp_mask)
452 {
453         struct nvm_tgt_dev *dev = pblk->dev;
454         void *kaddr = data;
455         struct page *page;
456         struct bio *bio;
457         int i, ret;
458
459         if (alloc_type == PBLK_KMALLOC_META)
460                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
461
462         bio = bio_kmalloc(gfp_mask, nr_secs);
463         if (!bio)
464                 return ERR_PTR(-ENOMEM);
465
466         for (i = 0; i < nr_secs; i++) {
467                 page = vmalloc_to_page(kaddr);
468                 if (!page) {
469                         pr_err("pblk: could not map vmalloc bio\n");
470                         bio_put(bio);
471                         bio = ERR_PTR(-ENOMEM);
472                         goto out;
473                 }
474
475                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
476                 if (ret != PAGE_SIZE) {
477                         pr_err("pblk: could not add page to bio\n");
478                         bio_put(bio);
479                         bio = ERR_PTR(-ENOMEM);
480                         goto out;
481                 }
482
483                 kaddr += PAGE_SIZE;
484         }
485
486         bio->bi_end_io = pblk_bio_map_addr_endio;
487 out:
488         return bio;
489 }
490
491 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
492                    unsigned long secs_to_flush)
493 {
494         int max = pblk->sec_per_write;
495         int min = pblk->min_write_pgs;
496         int secs_to_sync = 0;
497
498         if (secs_avail >= max)
499                 secs_to_sync = max;
500         else if (secs_avail >= min)
501                 secs_to_sync = min * (secs_avail / min);
502         else if (secs_to_flush)
503                 secs_to_sync = min;
504
505         return secs_to_sync;
506 }
507
508 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
509 {
510         u64 addr;
511         int i;
512
513         spin_lock(&line->lock);
514         addr = find_next_zero_bit(line->map_bitmap,
515                                         pblk->lm.sec_per_line, line->cur_sec);
516         line->cur_sec = addr - nr_secs;
517
518         for (i = 0; i < nr_secs; i++, line->cur_sec--)
519                 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
520         spin_unlock(&line->lock);
521 }
522
523 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
524 {
525         u64 addr;
526         int i;
527
528         lockdep_assert_held(&line->lock);
529
530         /* logic error: ppa out-of-bounds. Prevent generating bad address */
531         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
532                 WARN(1, "pblk: page allocation out of bounds\n");
533                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
534         }
535
536         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
537                                         pblk->lm.sec_per_line, line->cur_sec);
538         for (i = 0; i < nr_secs; i++, line->cur_sec++)
539                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
540
541         return addr;
542 }
543
544 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
545 {
546         u64 addr;
547
548         /* Lock needed in case a write fails and a recovery needs to remap
549          * failed write buffer entries
550          */
551         spin_lock(&line->lock);
552         addr = __pblk_alloc_page(pblk, line, nr_secs);
553         line->left_msecs -= nr_secs;
554         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
555         spin_unlock(&line->lock);
556
557         return addr;
558 }
559
560 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
561 {
562         u64 paddr;
563
564         spin_lock(&line->lock);
565         paddr = find_next_zero_bit(line->map_bitmap,
566                                         pblk->lm.sec_per_line, line->cur_sec);
567         spin_unlock(&line->lock);
568
569         return paddr;
570 }
571
572 /*
573  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
574  * taking the per LUN semaphore.
575  */
576 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
577                                      void *emeta_buf, u64 paddr, int dir)
578 {
579         struct nvm_tgt_dev *dev = pblk->dev;
580         struct nvm_geo *geo = &dev->geo;
581         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
582         struct pblk_line_meta *lm = &pblk->lm;
583         void *ppa_list, *meta_list;
584         struct bio *bio;
585         struct nvm_rq rqd;
586         dma_addr_t dma_ppa_list, dma_meta_list;
587         int min = pblk->min_write_pgs;
588         int left_ppas = lm->emeta_sec[0];
589         int id = line->id;
590         int rq_ppas, rq_len;
591         int cmd_op, bio_op;
592         int i, j;
593         int ret;
594
595         if (dir == PBLK_WRITE) {
596                 bio_op = REQ_OP_WRITE;
597                 cmd_op = NVM_OP_PWRITE;
598         } else if (dir == PBLK_READ) {
599                 bio_op = REQ_OP_READ;
600                 cmd_op = NVM_OP_PREAD;
601         } else
602                 return -EINVAL;
603
604         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
605                                                         &dma_meta_list);
606         if (!meta_list)
607                 return -ENOMEM;
608
609         ppa_list = meta_list + pblk_dma_meta_size;
610         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
611
612 next_rq:
613         memset(&rqd, 0, sizeof(struct nvm_rq));
614
615         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
616         rq_len = rq_ppas * geo->sec_size;
617
618         bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
619                                         l_mg->emeta_alloc_type, GFP_KERNEL);
620         if (IS_ERR(bio)) {
621                 ret = PTR_ERR(bio);
622                 goto free_rqd_dma;
623         }
624
625         bio->bi_iter.bi_sector = 0; /* internal bio */
626         bio_set_op_attrs(bio, bio_op, 0);
627
628         rqd.bio = bio;
629         rqd.meta_list = meta_list;
630         rqd.ppa_list = ppa_list;
631         rqd.dma_meta_list = dma_meta_list;
632         rqd.dma_ppa_list = dma_ppa_list;
633         rqd.opcode = cmd_op;
634         rqd.nr_ppas = rq_ppas;
635
636         if (dir == PBLK_WRITE) {
637                 struct pblk_sec_meta *meta_list = rqd.meta_list;
638
639                 rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
640                 for (i = 0; i < rqd.nr_ppas; ) {
641                         spin_lock(&line->lock);
642                         paddr = __pblk_alloc_page(pblk, line, min);
643                         spin_unlock(&line->lock);
644                         for (j = 0; j < min; j++, i++, paddr++) {
645                                 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
646                                 rqd.ppa_list[i] =
647                                         addr_to_gen_ppa(pblk, paddr, id);
648                         }
649                 }
650         } else {
651                 for (i = 0; i < rqd.nr_ppas; ) {
652                         struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
653                         int pos = pblk_dev_ppa_to_pos(geo, ppa);
654                         int read_type = PBLK_READ_RANDOM;
655
656                         if (pblk_io_aligned(pblk, rq_ppas))
657                                 read_type = PBLK_READ_SEQUENTIAL;
658                         rqd.flags = pblk_set_read_mode(pblk, read_type);
659
660                         while (test_bit(pos, line->blk_bitmap)) {
661                                 paddr += min;
662                                 if (pblk_boundary_paddr_checks(pblk, paddr)) {
663                                         pr_err("pblk: corrupt emeta line:%d\n",
664                                                                 line->id);
665                                         bio_put(bio);
666                                         ret = -EINTR;
667                                         goto free_rqd_dma;
668                                 }
669
670                                 ppa = addr_to_gen_ppa(pblk, paddr, id);
671                                 pos = pblk_dev_ppa_to_pos(geo, ppa);
672                         }
673
674                         if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
675                                 pr_err("pblk: corrupt emeta line:%d\n",
676                                                                 line->id);
677                                 bio_put(bio);
678                                 ret = -EINTR;
679                                 goto free_rqd_dma;
680                         }
681
682                         for (j = 0; j < min; j++, i++, paddr++)
683                                 rqd.ppa_list[i] =
684                                         addr_to_gen_ppa(pblk, paddr, line->id);
685                 }
686         }
687
688         ret = pblk_submit_io_sync(pblk, &rqd);
689         if (ret) {
690                 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
691                 bio_put(bio);
692                 goto free_rqd_dma;
693         }
694
695         atomic_dec(&pblk->inflight_io);
696
697         if (rqd.error) {
698                 if (dir == PBLK_WRITE)
699                         pblk_log_write_err(pblk, &rqd);
700                 else
701                         pblk_log_read_err(pblk, &rqd);
702         }
703
704         emeta_buf += rq_len;
705         left_ppas -= rq_ppas;
706         if (left_ppas)
707                 goto next_rq;
708 free_rqd_dma:
709         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
710         return ret;
711 }
712
713 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
714 {
715         struct nvm_tgt_dev *dev = pblk->dev;
716         struct nvm_geo *geo = &dev->geo;
717         struct pblk_line_meta *lm = &pblk->lm;
718         int bit;
719
720         /* This usually only happens on bad lines */
721         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
722         if (bit >= lm->blk_per_line)
723                 return -1;
724
725         return bit * geo->sec_per_pl;
726 }
727
728 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
729                                      u64 paddr, int dir)
730 {
731         struct nvm_tgt_dev *dev = pblk->dev;
732         struct pblk_line_meta *lm = &pblk->lm;
733         struct bio *bio;
734         struct nvm_rq rqd;
735         __le64 *lba_list = NULL;
736         int i, ret;
737         int cmd_op, bio_op;
738         int flags;
739
740         if (dir == PBLK_WRITE) {
741                 bio_op = REQ_OP_WRITE;
742                 cmd_op = NVM_OP_PWRITE;
743                 flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
744                 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
745         } else if (dir == PBLK_READ) {
746                 bio_op = REQ_OP_READ;
747                 cmd_op = NVM_OP_PREAD;
748                 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
749         } else
750                 return -EINVAL;
751
752         memset(&rqd, 0, sizeof(struct nvm_rq));
753
754         rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
755                                                         &rqd.dma_meta_list);
756         if (!rqd.meta_list)
757                 return -ENOMEM;
758
759         rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
760         rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
761
762         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
763         if (IS_ERR(bio)) {
764                 ret = PTR_ERR(bio);
765                 goto free_ppa_list;
766         }
767
768         bio->bi_iter.bi_sector = 0; /* internal bio */
769         bio_set_op_attrs(bio, bio_op, 0);
770
771         rqd.bio = bio;
772         rqd.opcode = cmd_op;
773         rqd.flags = flags;
774         rqd.nr_ppas = lm->smeta_sec;
775
776         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
777                 struct pblk_sec_meta *meta_list = rqd.meta_list;
778
779                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
780
781                 if (dir == PBLK_WRITE) {
782                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
783
784                         meta_list[i].lba = lba_list[paddr] = addr_empty;
785                 }
786         }
787
788         /*
789          * This I/O is sent by the write thread when a line is replace. Since
790          * the write thread is the only one sending write and erase commands,
791          * there is no need to take the LUN semaphore.
792          */
793         ret = pblk_submit_io_sync(pblk, &rqd);
794         if (ret) {
795                 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
796                 bio_put(bio);
797                 goto free_ppa_list;
798         }
799
800         atomic_dec(&pblk->inflight_io);
801
802         if (rqd.error) {
803                 if (dir == PBLK_WRITE)
804                         pblk_log_write_err(pblk, &rqd);
805                 else
806                         pblk_log_read_err(pblk, &rqd);
807         }
808
809 free_ppa_list:
810         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
811
812         return ret;
813 }
814
815 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
816 {
817         u64 bpaddr = pblk_line_smeta_start(pblk, line);
818
819         return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ);
820 }
821
822 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
823                          void *emeta_buf)
824 {
825         return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
826                                                 line->emeta_ssec, PBLK_READ);
827 }
828
829 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
830                             struct ppa_addr ppa)
831 {
832         rqd->opcode = NVM_OP_ERASE;
833         rqd->ppa_addr = ppa;
834         rqd->nr_ppas = 1;
835         rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
836         rqd->bio = NULL;
837 }
838
839 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
840 {
841         struct nvm_rq rqd;
842         int ret = 0;
843
844         memset(&rqd, 0, sizeof(struct nvm_rq));
845
846         pblk_setup_e_rq(pblk, &rqd, ppa);
847
848         /* The write thread schedules erases so that it minimizes disturbances
849          * with writes. Thus, there is no need to take the LUN semaphore.
850          */
851         ret = pblk_submit_io_sync(pblk, &rqd);
852         if (ret) {
853                 struct nvm_tgt_dev *dev = pblk->dev;
854                 struct nvm_geo *geo = &dev->geo;
855
856                 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
857                                         pblk_dev_ppa_to_line(ppa),
858                                         pblk_dev_ppa_to_pos(geo, ppa));
859
860                 rqd.error = ret;
861                 goto out;
862         }
863
864 out:
865         rqd.private = pblk;
866         __pblk_end_io_erase(pblk, &rqd);
867
868         return ret;
869 }
870
871 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
872 {
873         struct pblk_line_meta *lm = &pblk->lm;
874         struct ppa_addr ppa;
875         int ret, bit = -1;
876
877         /* Erase only good blocks, one at a time */
878         do {
879                 spin_lock(&line->lock);
880                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
881                                                                 bit + 1);
882                 if (bit >= lm->blk_per_line) {
883                         spin_unlock(&line->lock);
884                         break;
885                 }
886
887                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
888                 ppa.g.blk = line->id;
889
890                 atomic_dec(&line->left_eblks);
891                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
892                 spin_unlock(&line->lock);
893
894                 ret = pblk_blk_erase_sync(pblk, ppa);
895                 if (ret) {
896                         pr_err("pblk: failed to erase line %d\n", line->id);
897                         return ret;
898                 }
899         } while (1);
900
901         return 0;
902 }
903
904 static void pblk_line_setup_metadata(struct pblk_line *line,
905                                      struct pblk_line_mgmt *l_mg,
906                                      struct pblk_line_meta *lm)
907 {
908         int meta_line;
909
910         lockdep_assert_held(&l_mg->free_lock);
911
912 retry_meta:
913         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
914         if (meta_line == PBLK_DATA_LINES) {
915                 spin_unlock(&l_mg->free_lock);
916                 io_schedule();
917                 spin_lock(&l_mg->free_lock);
918                 goto retry_meta;
919         }
920
921         set_bit(meta_line, &l_mg->meta_bitmap);
922         line->meta_line = meta_line;
923
924         line->smeta = l_mg->sline_meta[meta_line];
925         line->emeta = l_mg->eline_meta[meta_line];
926
927         memset(line->smeta, 0, lm->smeta_len);
928         memset(line->emeta->buf, 0, lm->emeta_len[0]);
929
930         line->emeta->mem = 0;
931         atomic_set(&line->emeta->sync, 0);
932 }
933
934 /* For now lines are always assumed full lines. Thus, smeta former and current
935  * lun bitmaps are omitted.
936  */
937 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
938                                   struct pblk_line *cur)
939 {
940         struct nvm_tgt_dev *dev = pblk->dev;
941         struct nvm_geo *geo = &dev->geo;
942         struct pblk_line_meta *lm = &pblk->lm;
943         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
944         struct pblk_emeta *emeta = line->emeta;
945         struct line_emeta *emeta_buf = emeta->buf;
946         struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
947         int nr_blk_line;
948
949         /* After erasing the line, new bad blocks might appear and we risk
950          * having an invalid line
951          */
952         nr_blk_line = lm->blk_per_line -
953                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
954         if (nr_blk_line < lm->min_blk_line) {
955                 spin_lock(&l_mg->free_lock);
956                 spin_lock(&line->lock);
957                 line->state = PBLK_LINESTATE_BAD;
958                 spin_unlock(&line->lock);
959
960                 list_add_tail(&line->list, &l_mg->bad_list);
961                 spin_unlock(&l_mg->free_lock);
962
963                 pr_debug("pblk: line %d is bad\n", line->id);
964
965                 return 0;
966         }
967
968         /* Run-time metadata */
969         line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
970
971         /* Mark LUNs allocated in this line (all for now) */
972         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
973
974         smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
975         memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
976         smeta_buf->header.id = cpu_to_le32(line->id);
977         smeta_buf->header.type = cpu_to_le16(line->type);
978         smeta_buf->header.version = SMETA_VERSION;
979
980         /* Start metadata */
981         smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
982         smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
983
984         /* Fill metadata among lines */
985         if (cur) {
986                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
987                 smeta_buf->prev_id = cpu_to_le32(cur->id);
988                 cur->emeta->buf->next_id = cpu_to_le32(line->id);
989         } else {
990                 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
991         }
992
993         /* All smeta must be set at this point */
994         smeta_buf->header.crc = cpu_to_le32(
995                         pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
996         smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
997
998         /* End metadata */
999         memcpy(&emeta_buf->header, &smeta_buf->header,
1000                                                 sizeof(struct line_header));
1001         emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1002         emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1003         emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1004         emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1005         emeta_buf->crc = cpu_to_le32(0);
1006         emeta_buf->prev_id = smeta_buf->prev_id;
1007
1008         return 1;
1009 }
1010
1011 /* For now lines are always assumed full lines. Thus, smeta former and current
1012  * lun bitmaps are omitted.
1013  */
1014 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1015                              int init)
1016 {
1017         struct nvm_tgt_dev *dev = pblk->dev;
1018         struct nvm_geo *geo = &dev->geo;
1019         struct pblk_line_meta *lm = &pblk->lm;
1020         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1021         int nr_bb = 0;
1022         u64 off;
1023         int bit = -1;
1024
1025         line->sec_in_line = lm->sec_per_line;
1026
1027         /* Capture bad block information on line mapping bitmaps */
1028         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1029                                         bit + 1)) < lm->blk_per_line) {
1030                 off = bit * geo->sec_per_pl;
1031                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1032                                                         lm->sec_per_line);
1033                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1034                                                         lm->sec_per_line);
1035                 line->sec_in_line -= geo->sec_per_blk;
1036                 if (bit >= lm->emeta_bb)
1037                         nr_bb++;
1038         }
1039
1040         /* Mark smeta metadata sectors as bad sectors */
1041         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1042         off = bit * geo->sec_per_pl;
1043         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1044         line->sec_in_line -= lm->smeta_sec;
1045         line->smeta_ssec = off;
1046         line->cur_sec = off + lm->smeta_sec;
1047
1048         if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
1049                 pr_debug("pblk: line smeta I/O failed. Retry\n");
1050                 return 1;
1051         }
1052
1053         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1054
1055         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1056          * blocks to make sure that there are enough sectors to store emeta
1057          */
1058         off = lm->sec_per_line - lm->emeta_sec[0];
1059         bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
1060         while (nr_bb) {
1061                 off -= geo->sec_per_pl;
1062                 if (!test_bit(off, line->invalid_bitmap)) {
1063                         bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1064                         nr_bb--;
1065                 }
1066         }
1067
1068         line->sec_in_line -= lm->emeta_sec[0];
1069         line->emeta_ssec = off;
1070         line->nr_valid_lbas = 0;
1071         line->left_msecs = line->sec_in_line;
1072         *line->vsc = cpu_to_le32(line->sec_in_line);
1073
1074         if (lm->sec_per_line - line->sec_in_line !=
1075                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1076                 spin_lock(&line->lock);
1077                 line->state = PBLK_LINESTATE_BAD;
1078                 spin_unlock(&line->lock);
1079
1080                 list_add_tail(&line->list, &l_mg->bad_list);
1081                 pr_err("pblk: unexpected line %d is bad\n", line->id);
1082
1083                 return 0;
1084         }
1085
1086         return 1;
1087 }
1088
1089 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1090 {
1091         struct pblk_line_meta *lm = &pblk->lm;
1092         int blk_in_line = atomic_read(&line->blk_in_line);
1093
1094         line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_ATOMIC);
1095         if (!line->map_bitmap)
1096                 return -ENOMEM;
1097
1098         /* will be initialized using bb info from map_bitmap */
1099         line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_ATOMIC);
1100         if (!line->invalid_bitmap) {
1101                 kfree(line->map_bitmap);
1102                 return -ENOMEM;
1103         }
1104
1105         spin_lock(&line->lock);
1106         if (line->state != PBLK_LINESTATE_FREE) {
1107                 kfree(line->map_bitmap);
1108                 kfree(line->invalid_bitmap);
1109                 spin_unlock(&line->lock);
1110                 WARN(1, "pblk: corrupted line %d, state %d\n",
1111                                                         line->id, line->state);
1112                 return -EAGAIN;
1113         }
1114
1115         line->state = PBLK_LINESTATE_OPEN;
1116
1117         atomic_set(&line->left_eblks, blk_in_line);
1118         atomic_set(&line->left_seblks, blk_in_line);
1119
1120         line->meta_distance = lm->meta_distance;
1121         spin_unlock(&line->lock);
1122
1123         /* Bad blocks do not need to be erased */
1124         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1125
1126         kref_init(&line->ref);
1127
1128         return 0;
1129 }
1130
1131 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1132 {
1133         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1134         int ret;
1135
1136         spin_lock(&l_mg->free_lock);
1137         l_mg->data_line = line;
1138         list_del(&line->list);
1139
1140         ret = pblk_line_prepare(pblk, line);
1141         if (ret) {
1142                 list_add(&line->list, &l_mg->free_list);
1143                 spin_unlock(&l_mg->free_lock);
1144                 return ret;
1145         }
1146         spin_unlock(&l_mg->free_lock);
1147
1148         pblk_rl_free_lines_dec(&pblk->rl, line);
1149
1150         if (!pblk_line_init_bb(pblk, line, 0)) {
1151                 list_add(&line->list, &l_mg->free_list);
1152                 return -EINTR;
1153         }
1154
1155         return 0;
1156 }
1157
1158 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1159 {
1160         kfree(line->map_bitmap);
1161         line->map_bitmap = NULL;
1162         line->smeta = NULL;
1163         line->emeta = NULL;
1164 }
1165
1166 struct pblk_line *pblk_line_get(struct pblk *pblk)
1167 {
1168         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1169         struct pblk_line_meta *lm = &pblk->lm;
1170         struct pblk_line *line;
1171         int ret, bit;
1172
1173         lockdep_assert_held(&l_mg->free_lock);
1174
1175 retry:
1176         if (list_empty(&l_mg->free_list)) {
1177                 pr_err("pblk: no free lines\n");
1178                 return NULL;
1179         }
1180
1181         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1182         list_del(&line->list);
1183         l_mg->nr_free_lines--;
1184
1185         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1186         if (unlikely(bit >= lm->blk_per_line)) {
1187                 spin_lock(&line->lock);
1188                 line->state = PBLK_LINESTATE_BAD;
1189                 spin_unlock(&line->lock);
1190
1191                 list_add_tail(&line->list, &l_mg->bad_list);
1192
1193                 pr_debug("pblk: line %d is bad\n", line->id);
1194                 goto retry;
1195         }
1196
1197         ret = pblk_line_prepare(pblk, line);
1198         if (ret) {
1199                 if (ret == -EAGAIN) {
1200                         list_add(&line->list, &l_mg->corrupt_list);
1201                         goto retry;
1202                 } else {
1203                         pr_err("pblk: failed to prepare line %d\n", line->id);
1204                         list_add(&line->list, &l_mg->free_list);
1205                         l_mg->nr_free_lines++;
1206                         return NULL;
1207                 }
1208         }
1209
1210         return line;
1211 }
1212
1213 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1214                                          struct pblk_line *line)
1215 {
1216         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1217         struct pblk_line *retry_line;
1218
1219 retry:
1220         spin_lock(&l_mg->free_lock);
1221         retry_line = pblk_line_get(pblk);
1222         if (!retry_line) {
1223                 l_mg->data_line = NULL;
1224                 spin_unlock(&l_mg->free_lock);
1225                 return NULL;
1226         }
1227
1228         retry_line->smeta = line->smeta;
1229         retry_line->emeta = line->emeta;
1230         retry_line->meta_line = line->meta_line;
1231
1232         pblk_line_free(pblk, line);
1233         l_mg->data_line = retry_line;
1234         spin_unlock(&l_mg->free_lock);
1235
1236         pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1237
1238         if (pblk_line_erase(pblk, retry_line))
1239                 goto retry;
1240
1241         return retry_line;
1242 }
1243
1244 static void pblk_set_space_limit(struct pblk *pblk)
1245 {
1246         struct pblk_rl *rl = &pblk->rl;
1247
1248         atomic_set(&rl->rb_space, 0);
1249 }
1250
1251 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1252 {
1253         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1254         struct pblk_line *line;
1255         int is_next = 0;
1256
1257         spin_lock(&l_mg->free_lock);
1258         line = pblk_line_get(pblk);
1259         if (!line) {
1260                 spin_unlock(&l_mg->free_lock);
1261                 return NULL;
1262         }
1263
1264         line->seq_nr = l_mg->d_seq_nr++;
1265         line->type = PBLK_LINETYPE_DATA;
1266         l_mg->data_line = line;
1267
1268         pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1269
1270         /* Allocate next line for preparation */
1271         l_mg->data_next = pblk_line_get(pblk);
1272         if (!l_mg->data_next) {
1273                 /* If we cannot get a new line, we need to stop the pipeline.
1274                  * Only allow as many writes in as we can store safely and then
1275                  * fail gracefully
1276                  */
1277                 pblk_set_space_limit(pblk);
1278
1279                 l_mg->data_next = NULL;
1280         } else {
1281                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1282                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1283                 is_next = 1;
1284         }
1285         spin_unlock(&l_mg->free_lock);
1286
1287         if (pblk_line_erase(pblk, line)) {
1288                 line = pblk_line_retry(pblk, line);
1289                 if (!line)
1290                         return NULL;
1291         }
1292
1293         pblk_rl_free_lines_dec(&pblk->rl, line);
1294         if (is_next)
1295                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1296
1297 retry_setup:
1298         if (!pblk_line_init_metadata(pblk, line, NULL)) {
1299                 line = pblk_line_retry(pblk, line);
1300                 if (!line)
1301                         return NULL;
1302
1303                 goto retry_setup;
1304         }
1305
1306         if (!pblk_line_init_bb(pblk, line, 1)) {
1307                 line = pblk_line_retry(pblk, line);
1308                 if (!line)
1309                         return NULL;
1310
1311                 goto retry_setup;
1312         }
1313
1314         return line;
1315 }
1316
1317 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1318 {
1319         lockdep_assert_held(&pblk->l_mg.free_lock);
1320
1321         pblk_set_space_limit(pblk);
1322         pblk->state = PBLK_STATE_STOPPING;
1323 }
1324
1325 static void pblk_line_close_meta_sync(struct pblk *pblk)
1326 {
1327         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1328         struct pblk_line_meta *lm = &pblk->lm;
1329         struct pblk_line *line, *tline;
1330         LIST_HEAD(list);
1331
1332         spin_lock(&l_mg->close_lock);
1333         if (list_empty(&l_mg->emeta_list)) {
1334                 spin_unlock(&l_mg->close_lock);
1335                 return;
1336         }
1337
1338         list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1339         spin_unlock(&l_mg->close_lock);
1340
1341         list_for_each_entry_safe(line, tline, &list, list) {
1342                 struct pblk_emeta *emeta = line->emeta;
1343
1344                 while (emeta->mem < lm->emeta_len[0]) {
1345                         int ret;
1346
1347                         ret = pblk_submit_meta_io(pblk, line);
1348                         if (ret) {
1349                                 pr_err("pblk: sync meta line %d failed (%d)\n",
1350                                                         line->id, ret);
1351                                 return;
1352                         }
1353                 }
1354         }
1355
1356         pblk_wait_for_meta(pblk);
1357         flush_workqueue(pblk->close_wq);
1358 }
1359
1360 void pblk_pipeline_stop(struct pblk *pblk)
1361 {
1362         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1363         int ret;
1364
1365         spin_lock(&l_mg->free_lock);
1366         if (pblk->state == PBLK_STATE_RECOVERING ||
1367                                         pblk->state == PBLK_STATE_STOPPED) {
1368                 spin_unlock(&l_mg->free_lock);
1369                 return;
1370         }
1371         pblk->state = PBLK_STATE_RECOVERING;
1372         spin_unlock(&l_mg->free_lock);
1373
1374         pblk_flush_writer(pblk);
1375         pblk_wait_for_meta(pblk);
1376
1377         ret = pblk_recov_pad(pblk);
1378         if (ret) {
1379                 pr_err("pblk: could not close data on teardown(%d)\n", ret);
1380                 return;
1381         }
1382
1383         flush_workqueue(pblk->bb_wq);
1384         pblk_line_close_meta_sync(pblk);
1385
1386         spin_lock(&l_mg->free_lock);
1387         pblk->state = PBLK_STATE_STOPPED;
1388         l_mg->data_line = NULL;
1389         l_mg->data_next = NULL;
1390         spin_unlock(&l_mg->free_lock);
1391 }
1392
1393 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1394 {
1395         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1396         struct pblk_line *cur, *new = NULL;
1397         unsigned int left_seblks;
1398         int is_next = 0;
1399
1400         cur = l_mg->data_line;
1401         new = l_mg->data_next;
1402         if (!new)
1403                 goto out;
1404         l_mg->data_line = new;
1405
1406         spin_lock(&l_mg->free_lock);
1407         if (pblk->state != PBLK_STATE_RUNNING) {
1408                 l_mg->data_line = NULL;
1409                 l_mg->data_next = NULL;
1410                 spin_unlock(&l_mg->free_lock);
1411                 goto out;
1412         }
1413
1414         pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1415         spin_unlock(&l_mg->free_lock);
1416
1417 retry_erase:
1418         left_seblks = atomic_read(&new->left_seblks);
1419         if (left_seblks) {
1420                 /* If line is not fully erased, erase it */
1421                 if (atomic_read(&new->left_eblks)) {
1422                         if (pblk_line_erase(pblk, new))
1423                                 goto out;
1424                 } else {
1425                         io_schedule();
1426                 }
1427                 goto retry_erase;
1428         }
1429
1430 retry_setup:
1431         if (!pblk_line_init_metadata(pblk, new, cur)) {
1432                 new = pblk_line_retry(pblk, new);
1433                 if (!new)
1434                         goto out;
1435
1436                 goto retry_setup;
1437         }
1438
1439         if (!pblk_line_init_bb(pblk, new, 1)) {
1440                 new = pblk_line_retry(pblk, new);
1441                 if (!new)
1442                         goto out;
1443
1444                 goto retry_setup;
1445         }
1446
1447         /* Allocate next line for preparation */
1448         spin_lock(&l_mg->free_lock);
1449         l_mg->data_next = pblk_line_get(pblk);
1450         if (!l_mg->data_next) {
1451                 /* If we cannot get a new line, we need to stop the pipeline.
1452                  * Only allow as many writes in as we can store safely and then
1453                  * fail gracefully
1454                  */
1455                 pblk_stop_writes(pblk, new);
1456                 l_mg->data_next = NULL;
1457         } else {
1458                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1459                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1460                 is_next = 1;
1461         }
1462         spin_unlock(&l_mg->free_lock);
1463
1464         if (is_next)
1465                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1466
1467 out:
1468         return new;
1469 }
1470
1471 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1472 {
1473         kfree(line->map_bitmap);
1474         kfree(line->invalid_bitmap);
1475
1476         *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1477
1478         line->map_bitmap = NULL;
1479         line->invalid_bitmap = NULL;
1480         line->smeta = NULL;
1481         line->emeta = NULL;
1482 }
1483
1484 static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1485 {
1486         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1487         struct pblk_gc *gc = &pblk->gc;
1488
1489         spin_lock(&line->lock);
1490         WARN_ON(line->state != PBLK_LINESTATE_GC);
1491         line->state = PBLK_LINESTATE_FREE;
1492         line->gc_group = PBLK_LINEGC_NONE;
1493         pblk_line_free(pblk, line);
1494         spin_unlock(&line->lock);
1495
1496         atomic_dec(&gc->pipeline_gc);
1497
1498         spin_lock(&l_mg->free_lock);
1499         list_add_tail(&line->list, &l_mg->free_list);
1500         l_mg->nr_free_lines++;
1501         spin_unlock(&l_mg->free_lock);
1502
1503         pblk_rl_free_lines_inc(&pblk->rl, line);
1504 }
1505
1506 static void pblk_line_put_ws(struct work_struct *work)
1507 {
1508         struct pblk_line_ws *line_put_ws = container_of(work,
1509                                                 struct pblk_line_ws, ws);
1510         struct pblk *pblk = line_put_ws->pblk;
1511         struct pblk_line *line = line_put_ws->line;
1512
1513         __pblk_line_put(pblk, line);
1514         mempool_free(line_put_ws, pblk->gen_ws_pool);
1515 }
1516
1517 void pblk_line_put(struct kref *ref)
1518 {
1519         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1520         struct pblk *pblk = line->pblk;
1521
1522         __pblk_line_put(pblk, line);
1523 }
1524
1525 void pblk_line_put_wq(struct kref *ref)
1526 {
1527         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1528         struct pblk *pblk = line->pblk;
1529         struct pblk_line_ws *line_put_ws;
1530
1531         line_put_ws = mempool_alloc(pblk->gen_ws_pool, GFP_ATOMIC);
1532         if (!line_put_ws)
1533                 return;
1534
1535         line_put_ws->pblk = pblk;
1536         line_put_ws->line = line;
1537         line_put_ws->priv = NULL;
1538
1539         INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1540         queue_work(pblk->r_end_wq, &line_put_ws->ws);
1541 }
1542
1543 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1544 {
1545         struct nvm_rq *rqd;
1546         int err;
1547
1548         rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1549
1550         pblk_setup_e_rq(pblk, rqd, ppa);
1551
1552         rqd->end_io = pblk_end_io_erase;
1553         rqd->private = pblk;
1554
1555         /* The write thread schedules erases so that it minimizes disturbances
1556          * with writes. Thus, there is no need to take the LUN semaphore.
1557          */
1558         err = pblk_submit_io(pblk, rqd);
1559         if (err) {
1560                 struct nvm_tgt_dev *dev = pblk->dev;
1561                 struct nvm_geo *geo = &dev->geo;
1562
1563                 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1564                                         pblk_dev_ppa_to_line(ppa),
1565                                         pblk_dev_ppa_to_pos(geo, ppa));
1566         }
1567
1568         return err;
1569 }
1570
1571 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1572 {
1573         return pblk->l_mg.data_line;
1574 }
1575
1576 /* For now, always erase next line */
1577 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1578 {
1579         return pblk->l_mg.data_next;
1580 }
1581
1582 int pblk_line_is_full(struct pblk_line *line)
1583 {
1584         return (line->left_msecs == 0);
1585 }
1586
1587 static void pblk_line_should_sync_meta(struct pblk *pblk)
1588 {
1589         if (pblk_rl_is_limit(&pblk->rl))
1590                 pblk_line_close_meta_sync(pblk);
1591 }
1592
1593 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1594 {
1595         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1596         struct list_head *move_list;
1597
1598 #ifdef CONFIG_NVM_DEBUG
1599         struct pblk_line_meta *lm = &pblk->lm;
1600
1601         WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1602                                 "pblk: corrupt closed line %d\n", line->id);
1603 #endif
1604
1605         spin_lock(&l_mg->free_lock);
1606         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1607         spin_unlock(&l_mg->free_lock);
1608
1609         spin_lock(&l_mg->gc_lock);
1610         spin_lock(&line->lock);
1611         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1612         line->state = PBLK_LINESTATE_CLOSED;
1613         move_list = pblk_line_gc_list(pblk, line);
1614
1615         list_add_tail(&line->list, move_list);
1616
1617         kfree(line->map_bitmap);
1618         line->map_bitmap = NULL;
1619         line->smeta = NULL;
1620         line->emeta = NULL;
1621
1622         spin_unlock(&line->lock);
1623         spin_unlock(&l_mg->gc_lock);
1624 }
1625
1626 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1627 {
1628         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1629         struct pblk_line_meta *lm = &pblk->lm;
1630         struct pblk_emeta *emeta = line->emeta;
1631         struct line_emeta *emeta_buf = emeta->buf;
1632
1633         /* No need for exact vsc value; avoid a big line lock and take aprox. */
1634         memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1635         memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1636
1637         emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1638         emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1639
1640         spin_lock(&l_mg->close_lock);
1641         spin_lock(&line->lock);
1642         list_add_tail(&line->list, &l_mg->emeta_list);
1643         spin_unlock(&line->lock);
1644         spin_unlock(&l_mg->close_lock);
1645
1646         pblk_line_should_sync_meta(pblk);
1647 }
1648
1649 void pblk_line_close_ws(struct work_struct *work)
1650 {
1651         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1652                                                                         ws);
1653         struct pblk *pblk = line_ws->pblk;
1654         struct pblk_line *line = line_ws->line;
1655
1656         pblk_line_close(pblk, line);
1657         mempool_free(line_ws, pblk->gen_ws_pool);
1658 }
1659
1660 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1661                       void (*work)(struct work_struct *), gfp_t gfp_mask,
1662                       struct workqueue_struct *wq)
1663 {
1664         struct pblk_line_ws *line_ws;
1665
1666         line_ws = mempool_alloc(pblk->gen_ws_pool, gfp_mask);
1667
1668         line_ws->pblk = pblk;
1669         line_ws->line = line;
1670         line_ws->priv = priv;
1671
1672         INIT_WORK(&line_ws->ws, work);
1673         queue_work(wq, &line_ws->ws);
1674 }
1675
1676 static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
1677                              int nr_ppas, int pos)
1678 {
1679         struct pblk_lun *rlun = &pblk->luns[pos];
1680         int ret;
1681
1682         /*
1683          * Only send one inflight I/O per LUN. Since we map at a page
1684          * granurality, all ppas in the I/O will map to the same LUN
1685          */
1686 #ifdef CONFIG_NVM_DEBUG
1687         int i;
1688
1689         for (i = 1; i < nr_ppas; i++)
1690                 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1691                                 ppa_list[0].g.ch != ppa_list[i].g.ch);
1692 #endif
1693
1694         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1695         if (ret == -ETIME || ret == -EINTR)
1696                 pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
1697 }
1698
1699 void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1700 {
1701         struct nvm_tgt_dev *dev = pblk->dev;
1702         struct nvm_geo *geo = &dev->geo;
1703         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1704
1705         __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1706 }
1707
1708 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1709                   unsigned long *lun_bitmap)
1710 {
1711         struct nvm_tgt_dev *dev = pblk->dev;
1712         struct nvm_geo *geo = &dev->geo;
1713         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1714
1715         /* If the LUN has been locked for this same request, do no attempt to
1716          * lock it again
1717          */
1718         if (test_and_set_bit(pos, lun_bitmap))
1719                 return;
1720
1721         __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1722 }
1723
1724 void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1725 {
1726         struct nvm_tgt_dev *dev = pblk->dev;
1727         struct nvm_geo *geo = &dev->geo;
1728         struct pblk_lun *rlun;
1729         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1730
1731 #ifdef CONFIG_NVM_DEBUG
1732         int i;
1733
1734         for (i = 1; i < nr_ppas; i++)
1735                 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1736                                 ppa_list[0].g.ch != ppa_list[i].g.ch);
1737 #endif
1738
1739         rlun = &pblk->luns[pos];
1740         up(&rlun->wr_sem);
1741 }
1742
1743 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1744                 unsigned long *lun_bitmap)
1745 {
1746         struct nvm_tgt_dev *dev = pblk->dev;
1747         struct nvm_geo *geo = &dev->geo;
1748         struct pblk_lun *rlun;
1749         int nr_luns = geo->nr_luns;
1750         int bit = -1;
1751
1752         while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1753                 rlun = &pblk->luns[bit];
1754                 up(&rlun->wr_sem);
1755         }
1756 }
1757
1758 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1759 {
1760         struct ppa_addr ppa_l2p;
1761
1762         /* logic error: lba out-of-bounds. Ignore update */
1763         if (!(lba < pblk->rl.nr_secs)) {
1764                 WARN(1, "pblk: corrupted L2P map request\n");
1765                 return;
1766         }
1767
1768         spin_lock(&pblk->trans_lock);
1769         ppa_l2p = pblk_trans_map_get(pblk, lba);
1770
1771         if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1772                 pblk_map_invalidate(pblk, ppa_l2p);
1773
1774         pblk_trans_map_set(pblk, lba, ppa);
1775         spin_unlock(&pblk->trans_lock);
1776 }
1777
1778 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1779 {
1780
1781 #ifdef CONFIG_NVM_DEBUG
1782         /* Callers must ensure that the ppa points to a cache address */
1783         BUG_ON(!pblk_addr_in_cache(ppa));
1784         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1785 #endif
1786
1787         pblk_update_map(pblk, lba, ppa);
1788 }
1789
1790 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
1791                        struct pblk_line *gc_line, u64 paddr_gc)
1792 {
1793         struct ppa_addr ppa_l2p, ppa_gc;
1794         int ret = 1;
1795
1796 #ifdef CONFIG_NVM_DEBUG
1797         /* Callers must ensure that the ppa points to a cache address */
1798         BUG_ON(!pblk_addr_in_cache(ppa_new));
1799         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
1800 #endif
1801
1802         /* logic error: lba out-of-bounds. Ignore update */
1803         if (!(lba < pblk->rl.nr_secs)) {
1804                 WARN(1, "pblk: corrupted L2P map request\n");
1805                 return 0;
1806         }
1807
1808         spin_lock(&pblk->trans_lock);
1809         ppa_l2p = pblk_trans_map_get(pblk, lba);
1810         ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
1811
1812         if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
1813                 spin_lock(&gc_line->lock);
1814                 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
1815                                                 "pblk: corrupted GC update");
1816                 spin_unlock(&gc_line->lock);
1817
1818                 ret = 0;
1819                 goto out;
1820         }
1821
1822         pblk_trans_map_set(pblk, lba, ppa_new);
1823 out:
1824         spin_unlock(&pblk->trans_lock);
1825         return ret;
1826 }
1827
1828 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
1829                          struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
1830 {
1831         struct ppa_addr ppa_l2p;
1832
1833 #ifdef CONFIG_NVM_DEBUG
1834         /* Callers must ensure that the ppa points to a device address */
1835         BUG_ON(pblk_addr_in_cache(ppa_mapped));
1836 #endif
1837         /* Invalidate and discard padded entries */
1838         if (lba == ADDR_EMPTY) {
1839 #ifdef CONFIG_NVM_DEBUG
1840                 atomic_long_inc(&pblk->padded_wb);
1841 #endif
1842                 if (!pblk_ppa_empty(ppa_mapped))
1843                         pblk_map_invalidate(pblk, ppa_mapped);
1844                 return;
1845         }
1846
1847         /* logic error: lba out-of-bounds. Ignore update */
1848         if (!(lba < pblk->rl.nr_secs)) {
1849                 WARN(1, "pblk: corrupted L2P map request\n");
1850                 return;
1851         }
1852
1853         spin_lock(&pblk->trans_lock);
1854         ppa_l2p = pblk_trans_map_get(pblk, lba);
1855
1856         /* Do not update L2P if the cacheline has been updated. In this case,
1857          * the mapped ppa must be invalidated
1858          */
1859         if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
1860                 if (!pblk_ppa_empty(ppa_mapped))
1861                         pblk_map_invalidate(pblk, ppa_mapped);
1862                 goto out;
1863         }
1864
1865 #ifdef CONFIG_NVM_DEBUG
1866         WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
1867 #endif
1868
1869         pblk_trans_map_set(pblk, lba, ppa_mapped);
1870 out:
1871         spin_unlock(&pblk->trans_lock);
1872 }
1873
1874 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1875                          sector_t blba, int nr_secs)
1876 {
1877         int i;
1878
1879         spin_lock(&pblk->trans_lock);
1880         for (i = 0; i < nr_secs; i++) {
1881                 struct ppa_addr ppa;
1882
1883                 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
1884
1885                 /* If the L2P entry maps to a line, the reference is valid */
1886                 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
1887                         int line_id = pblk_dev_ppa_to_line(ppa);
1888                         struct pblk_line *line = &pblk->lines[line_id];
1889
1890                         kref_get(&line->ref);
1891                 }
1892         }
1893         spin_unlock(&pblk->trans_lock);
1894 }
1895
1896 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1897                           u64 *lba_list, int nr_secs)
1898 {
1899         u64 lba;
1900         int i;
1901
1902         spin_lock(&pblk->trans_lock);
1903         for (i = 0; i < nr_secs; i++) {
1904                 lba = lba_list[i];
1905                 if (lba != ADDR_EMPTY) {
1906                         /* logic error: lba out-of-bounds. Ignore update */
1907                         if (!(lba < pblk->rl.nr_secs)) {
1908                                 WARN(1, "pblk: corrupted L2P map request\n");
1909                                 continue;
1910                         }
1911                         ppas[i] = pblk_trans_map_get(pblk, lba);
1912                 }
1913         }
1914         spin_unlock(&pblk->trans_lock);
1915 }