lightnvm: pblk: GC error handling
[linux-2.6-block.git] / drivers / lightnvm / pblk-core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * pblk-core.c - pblk's core functionality
17  *
18  */
19
20 #define CREATE_TRACE_POINTS
21
22 #include "pblk.h"
23 #include "pblk-trace.h"
24
25 static void pblk_line_mark_bb(struct work_struct *work)
26 {
27         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
28                                                                         ws);
29         struct pblk *pblk = line_ws->pblk;
30         struct nvm_tgt_dev *dev = pblk->dev;
31         struct ppa_addr *ppa = line_ws->priv;
32         int ret;
33
34         ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
35         if (ret) {
36                 struct pblk_line *line;
37                 int pos;
38
39                 line = pblk_ppa_to_line(pblk, *ppa);
40                 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
41
42                 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
43                                 line->id, pos);
44         }
45
46         kfree(ppa);
47         mempool_free(line_ws, &pblk->gen_ws_pool);
48 }
49
50 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
51                          struct ppa_addr ppa_addr)
52 {
53         struct nvm_tgt_dev *dev = pblk->dev;
54         struct nvm_geo *geo = &dev->geo;
55         struct ppa_addr *ppa;
56         int pos = pblk_ppa_to_pos(geo, ppa_addr);
57
58         pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
59         atomic_long_inc(&pblk->erase_failed);
60
61         atomic_dec(&line->blk_in_line);
62         if (test_and_set_bit(pos, line->blk_bitmap))
63                 pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
64                                                         line->id, pos);
65
66         /* Not necessary to mark bad blocks on 2.0 spec. */
67         if (geo->version == NVM_OCSSD_SPEC_20)
68                 return;
69
70         ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
71         if (!ppa)
72                 return;
73
74         *ppa = ppa_addr;
75         pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
76                                                 GFP_ATOMIC, pblk->bb_wq);
77 }
78
79 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
80 {
81         struct nvm_tgt_dev *dev = pblk->dev;
82         struct nvm_geo *geo = &dev->geo;
83         struct nvm_chk_meta *chunk;
84         struct pblk_line *line;
85         int pos;
86
87         line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
88         pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
89         chunk = &line->chks[pos];
90
91         atomic_dec(&line->left_seblks);
92
93         if (rqd->error) {
94                 trace_pblk_chunk_reset(pblk_disk_name(pblk),
95                                 &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
96
97                 chunk->state = NVM_CHK_ST_OFFLINE;
98                 pblk_mark_bb(pblk, line, rqd->ppa_addr);
99         } else {
100                 trace_pblk_chunk_reset(pblk_disk_name(pblk),
101                                 &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
102
103                 chunk->state = NVM_CHK_ST_FREE;
104         }
105
106         trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
107                                 chunk->state);
108
109         atomic_dec(&pblk->inflight_io);
110 }
111
112 /* Erase completion assumes that only one block is erased at the time */
113 static void pblk_end_io_erase(struct nvm_rq *rqd)
114 {
115         struct pblk *pblk = rqd->private;
116
117         __pblk_end_io_erase(pblk, rqd);
118         mempool_free(rqd, &pblk->e_rq_pool);
119 }
120
121 /*
122  * Get information for all chunks from the device.
123  *
124  * The caller is responsible for freeing (vmalloc) the returned structure
125  */
126 struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
127 {
128         struct nvm_tgt_dev *dev = pblk->dev;
129         struct nvm_geo *geo = &dev->geo;
130         struct nvm_chk_meta *meta;
131         struct ppa_addr ppa;
132         unsigned long len;
133         int ret;
134
135         ppa.ppa = 0;
136
137         len = geo->all_chunks * sizeof(*meta);
138         meta = vzalloc(len);
139         if (!meta)
140                 return ERR_PTR(-ENOMEM);
141
142         ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
143         if (ret) {
144                 vfree(meta);
145                 return ERR_PTR(-EIO);
146         }
147
148         return meta;
149 }
150
151 struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
152                                               struct nvm_chk_meta *meta,
153                                               struct ppa_addr ppa)
154 {
155         struct nvm_tgt_dev *dev = pblk->dev;
156         struct nvm_geo *geo = &dev->geo;
157         int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
158         int lun_off = ppa.m.pu * geo->num_chk;
159         int chk_off = ppa.m.chk;
160
161         return meta + ch_off + lun_off + chk_off;
162 }
163
164 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
165                            u64 paddr)
166 {
167         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
168         struct list_head *move_list = NULL;
169
170         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
171          * table is modified with reclaimed sectors, a check is done to endure
172          * that newer updates are not overwritten.
173          */
174         spin_lock(&line->lock);
175         WARN_ON(line->state == PBLK_LINESTATE_FREE);
176
177         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
178                 WARN_ONCE(1, "pblk: double invalidate\n");
179                 spin_unlock(&line->lock);
180                 return;
181         }
182         le32_add_cpu(line->vsc, -1);
183
184         if (line->state == PBLK_LINESTATE_CLOSED)
185                 move_list = pblk_line_gc_list(pblk, line);
186         spin_unlock(&line->lock);
187
188         if (move_list) {
189                 spin_lock(&l_mg->gc_lock);
190                 spin_lock(&line->lock);
191                 /* Prevent moving a line that has just been chosen for GC */
192                 if (line->state == PBLK_LINESTATE_GC) {
193                         spin_unlock(&line->lock);
194                         spin_unlock(&l_mg->gc_lock);
195                         return;
196                 }
197                 spin_unlock(&line->lock);
198
199                 list_move_tail(&line->list, move_list);
200                 spin_unlock(&l_mg->gc_lock);
201         }
202 }
203
204 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
205 {
206         struct pblk_line *line;
207         u64 paddr;
208
209 #ifdef CONFIG_NVM_PBLK_DEBUG
210         /* Callers must ensure that the ppa points to a device address */
211         BUG_ON(pblk_addr_in_cache(ppa));
212         BUG_ON(pblk_ppa_empty(ppa));
213 #endif
214
215         line = pblk_ppa_to_line(pblk, ppa);
216         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
217
218         __pblk_map_invalidate(pblk, line, paddr);
219 }
220
221 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
222                                   unsigned int nr_secs)
223 {
224         sector_t lba;
225
226         spin_lock(&pblk->trans_lock);
227         for (lba = slba; lba < slba + nr_secs; lba++) {
228                 struct ppa_addr ppa;
229
230                 ppa = pblk_trans_map_get(pblk, lba);
231
232                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
233                         pblk_map_invalidate(pblk, ppa);
234
235                 pblk_ppa_set_empty(&ppa);
236                 pblk_trans_map_set(pblk, lba, ppa);
237         }
238         spin_unlock(&pblk->trans_lock);
239 }
240
241 int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
242 {
243         struct nvm_tgt_dev *dev = pblk->dev;
244
245         rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
246                                                         &rqd->dma_meta_list);
247         if (!rqd->meta_list)
248                 return -ENOMEM;
249
250         if (rqd->nr_ppas == 1)
251                 return 0;
252
253         rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
254         rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
255
256         return 0;
257 }
258
259 void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
260 {
261         struct nvm_tgt_dev *dev = pblk->dev;
262
263         if (rqd->meta_list)
264                 nvm_dev_dma_free(dev->parent, rqd->meta_list,
265                                 rqd->dma_meta_list);
266 }
267
268 /* Caller must guarantee that the request is a valid type */
269 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
270 {
271         mempool_t *pool;
272         struct nvm_rq *rqd;
273         int rq_size;
274
275         switch (type) {
276         case PBLK_WRITE:
277         case PBLK_WRITE_INT:
278                 pool = &pblk->w_rq_pool;
279                 rq_size = pblk_w_rq_size;
280                 break;
281         case PBLK_READ:
282                 pool = &pblk->r_rq_pool;
283                 rq_size = pblk_g_rq_size;
284                 break;
285         default:
286                 pool = &pblk->e_rq_pool;
287                 rq_size = pblk_g_rq_size;
288         }
289
290         rqd = mempool_alloc(pool, GFP_KERNEL);
291         memset(rqd, 0, rq_size);
292
293         return rqd;
294 }
295
296 /* Typically used on completion path. Cannot guarantee request consistency */
297 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
298 {
299         mempool_t *pool;
300
301         switch (type) {
302         case PBLK_WRITE:
303                 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
304                 /* fall through */
305         case PBLK_WRITE_INT:
306                 pool = &pblk->w_rq_pool;
307                 break;
308         case PBLK_READ:
309                 pool = &pblk->r_rq_pool;
310                 break;
311         case PBLK_ERASE:
312                 pool = &pblk->e_rq_pool;
313                 break;
314         default:
315                 pblk_err(pblk, "trying to free unknown rqd type\n");
316                 return;
317         }
318
319         pblk_free_rqd_meta(pblk, rqd);
320         mempool_free(rqd, pool);
321 }
322
323 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
324                          int nr_pages)
325 {
326         struct bio_vec bv;
327         int i;
328
329         WARN_ON(off + nr_pages != bio->bi_vcnt);
330
331         for (i = off; i < nr_pages + off; i++) {
332                 bv = bio->bi_io_vec[i];
333                 mempool_free(bv.bv_page, &pblk->page_bio_pool);
334         }
335 }
336
337 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
338                        int nr_pages)
339 {
340         struct request_queue *q = pblk->dev->q;
341         struct page *page;
342         int i, ret;
343
344         for (i = 0; i < nr_pages; i++) {
345                 page = mempool_alloc(&pblk->page_bio_pool, flags);
346
347                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
348                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
349                         pblk_err(pblk, "could not add page to bio\n");
350                         mempool_free(page, &pblk->page_bio_pool);
351                         goto err;
352                 }
353         }
354
355         return 0;
356 err:
357         pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
358         return -1;
359 }
360
361 void pblk_write_kick(struct pblk *pblk)
362 {
363         wake_up_process(pblk->writer_ts);
364         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
365 }
366
367 void pblk_write_timer_fn(struct timer_list *t)
368 {
369         struct pblk *pblk = from_timer(pblk, t, wtimer);
370
371         /* kick the write thread every tick to flush outstanding data */
372         pblk_write_kick(pblk);
373 }
374
375 void pblk_write_should_kick(struct pblk *pblk)
376 {
377         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
378
379         if (secs_avail >= pblk->min_write_pgs_data)
380                 pblk_write_kick(pblk);
381 }
382
383 static void pblk_wait_for_meta(struct pblk *pblk)
384 {
385         do {
386                 if (!atomic_read(&pblk->inflight_io))
387                         break;
388
389                 schedule();
390         } while (1);
391 }
392
393 static void pblk_flush_writer(struct pblk *pblk)
394 {
395         pblk_rb_flush(&pblk->rwb);
396         do {
397                 if (!pblk_rb_sync_count(&pblk->rwb))
398                         break;
399
400                 pblk_write_kick(pblk);
401                 schedule();
402         } while (1);
403 }
404
405 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
406 {
407         struct pblk_line_meta *lm = &pblk->lm;
408         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
409         struct list_head *move_list = NULL;
410         int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
411                         * (pblk->min_write_pgs - pblk->min_write_pgs_data);
412         int vsc = le32_to_cpu(*line->vsc) + packed_meta;
413
414         lockdep_assert_held(&line->lock);
415
416         if (line->w_err_gc->has_write_err) {
417                 if (line->gc_group != PBLK_LINEGC_WERR) {
418                         line->gc_group = PBLK_LINEGC_WERR;
419                         move_list = &l_mg->gc_werr_list;
420                         pblk_rl_werr_line_in(&pblk->rl);
421                 }
422         } else if (!vsc) {
423                 if (line->gc_group != PBLK_LINEGC_FULL) {
424                         line->gc_group = PBLK_LINEGC_FULL;
425                         move_list = &l_mg->gc_full_list;
426                 }
427         } else if (vsc < lm->high_thrs) {
428                 if (line->gc_group != PBLK_LINEGC_HIGH) {
429                         line->gc_group = PBLK_LINEGC_HIGH;
430                         move_list = &l_mg->gc_high_list;
431                 }
432         } else if (vsc < lm->mid_thrs) {
433                 if (line->gc_group != PBLK_LINEGC_MID) {
434                         line->gc_group = PBLK_LINEGC_MID;
435                         move_list = &l_mg->gc_mid_list;
436                 }
437         } else if (vsc < line->sec_in_line) {
438                 if (line->gc_group != PBLK_LINEGC_LOW) {
439                         line->gc_group = PBLK_LINEGC_LOW;
440                         move_list = &l_mg->gc_low_list;
441                 }
442         } else if (vsc == line->sec_in_line) {
443                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
444                         line->gc_group = PBLK_LINEGC_EMPTY;
445                         move_list = &l_mg->gc_empty_list;
446                 }
447         } else {
448                 line->state = PBLK_LINESTATE_CORRUPT;
449                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
450                                         line->state);
451
452                 line->gc_group = PBLK_LINEGC_NONE;
453                 move_list =  &l_mg->corrupt_list;
454                 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
455                                                 line->id, vsc,
456                                                 line->sec_in_line,
457                                                 lm->high_thrs, lm->mid_thrs);
458         }
459
460         return move_list;
461 }
462
463 void pblk_discard(struct pblk *pblk, struct bio *bio)
464 {
465         sector_t slba = pblk_get_lba(bio);
466         sector_t nr_secs = pblk_get_secs(bio);
467
468         pblk_invalidate_range(pblk, slba, nr_secs);
469 }
470
471 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
472 {
473         atomic_long_inc(&pblk->write_failed);
474 #ifdef CONFIG_NVM_PBLK_DEBUG
475         pblk_print_failed_rqd(pblk, rqd, rqd->error);
476 #endif
477 }
478
479 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
480 {
481         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
482         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
483                 atomic_long_inc(&pblk->read_empty);
484                 return;
485         }
486
487         switch (rqd->error) {
488         case NVM_RSP_WARN_HIGHECC:
489                 atomic_long_inc(&pblk->read_high_ecc);
490                 break;
491         case NVM_RSP_ERR_FAILECC:
492         case NVM_RSP_ERR_FAILCRC:
493                 atomic_long_inc(&pblk->read_failed);
494                 break;
495         default:
496                 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
497         }
498 #ifdef CONFIG_NVM_PBLK_DEBUG
499         pblk_print_failed_rqd(pblk, rqd, rqd->error);
500 #endif
501 }
502
503 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
504 {
505         pblk->sec_per_write = sec_per_write;
506 }
507
508 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
509 {
510         struct nvm_tgt_dev *dev = pblk->dev;
511
512         atomic_inc(&pblk->inflight_io);
513
514 #ifdef CONFIG_NVM_PBLK_DEBUG
515         if (pblk_check_io(pblk, rqd))
516                 return NVM_IO_ERR;
517 #endif
518
519         return nvm_submit_io(dev, rqd);
520 }
521
522 void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
523 {
524         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
525
526         int i;
527
528         for (i = 0; i < rqd->nr_ppas; i++) {
529                 struct ppa_addr *ppa = &ppa_list[i];
530                 struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
531                 u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
532
533                 if (caddr == 0)
534                         trace_pblk_chunk_state(pblk_disk_name(pblk),
535                                                         ppa, NVM_CHK_ST_OPEN);
536                 else if (caddr == (chunk->cnlb - 1))
537                         trace_pblk_chunk_state(pblk_disk_name(pblk),
538                                                         ppa, NVM_CHK_ST_CLOSED);
539         }
540 }
541
542 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
543 {
544         struct nvm_tgt_dev *dev = pblk->dev;
545         int ret;
546
547         atomic_inc(&pblk->inflight_io);
548
549 #ifdef CONFIG_NVM_PBLK_DEBUG
550         if (pblk_check_io(pblk, rqd))
551                 return NVM_IO_ERR;
552 #endif
553
554         ret = nvm_submit_io_sync(dev, rqd);
555
556         if (trace_pblk_chunk_state_enabled() && !ret &&
557             rqd->opcode == NVM_OP_PWRITE)
558                 pblk_check_chunk_state_update(pblk, rqd);
559
560         return ret;
561 }
562
563 int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
564 {
565         struct ppa_addr *ppa_list;
566         int ret;
567
568         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
569
570         pblk_down_chunk(pblk, ppa_list[0]);
571         ret = pblk_submit_io_sync(pblk, rqd);
572         pblk_up_chunk(pblk, ppa_list[0]);
573
574         return ret;
575 }
576
577 static void pblk_bio_map_addr_endio(struct bio *bio)
578 {
579         bio_put(bio);
580 }
581
582 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
583                               unsigned int nr_secs, unsigned int len,
584                               int alloc_type, gfp_t gfp_mask)
585 {
586         struct nvm_tgt_dev *dev = pblk->dev;
587         void *kaddr = data;
588         struct page *page;
589         struct bio *bio;
590         int i, ret;
591
592         if (alloc_type == PBLK_KMALLOC_META)
593                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
594
595         bio = bio_kmalloc(gfp_mask, nr_secs);
596         if (!bio)
597                 return ERR_PTR(-ENOMEM);
598
599         for (i = 0; i < nr_secs; i++) {
600                 page = vmalloc_to_page(kaddr);
601                 if (!page) {
602                         pblk_err(pblk, "could not map vmalloc bio\n");
603                         bio_put(bio);
604                         bio = ERR_PTR(-ENOMEM);
605                         goto out;
606                 }
607
608                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
609                 if (ret != PAGE_SIZE) {
610                         pblk_err(pblk, "could not add page to bio\n");
611                         bio_put(bio);
612                         bio = ERR_PTR(-ENOMEM);
613                         goto out;
614                 }
615
616                 kaddr += PAGE_SIZE;
617         }
618
619         bio->bi_end_io = pblk_bio_map_addr_endio;
620 out:
621         return bio;
622 }
623
624 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
625                    unsigned long secs_to_flush, bool skip_meta)
626 {
627         int max = pblk->sec_per_write;
628         int min = pblk->min_write_pgs;
629         int secs_to_sync = 0;
630
631         if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
632                 min = max = pblk->min_write_pgs_data;
633
634         if (secs_avail >= max)
635                 secs_to_sync = max;
636         else if (secs_avail >= min)
637                 secs_to_sync = min * (secs_avail / min);
638         else if (secs_to_flush)
639                 secs_to_sync = min;
640
641         return secs_to_sync;
642 }
643
644 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
645 {
646         u64 addr;
647         int i;
648
649         spin_lock(&line->lock);
650         addr = find_next_zero_bit(line->map_bitmap,
651                                         pblk->lm.sec_per_line, line->cur_sec);
652         line->cur_sec = addr - nr_secs;
653
654         for (i = 0; i < nr_secs; i++, line->cur_sec--)
655                 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
656         spin_unlock(&line->lock);
657 }
658
659 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
660 {
661         u64 addr;
662         int i;
663
664         lockdep_assert_held(&line->lock);
665
666         /* logic error: ppa out-of-bounds. Prevent generating bad address */
667         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
668                 WARN(1, "pblk: page allocation out of bounds\n");
669                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
670         }
671
672         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
673                                         pblk->lm.sec_per_line, line->cur_sec);
674         for (i = 0; i < nr_secs; i++, line->cur_sec++)
675                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
676
677         return addr;
678 }
679
680 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
681 {
682         u64 addr;
683
684         /* Lock needed in case a write fails and a recovery needs to remap
685          * failed write buffer entries
686          */
687         spin_lock(&line->lock);
688         addr = __pblk_alloc_page(pblk, line, nr_secs);
689         line->left_msecs -= nr_secs;
690         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
691         spin_unlock(&line->lock);
692
693         return addr;
694 }
695
696 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
697 {
698         u64 paddr;
699
700         spin_lock(&line->lock);
701         paddr = find_next_zero_bit(line->map_bitmap,
702                                         pblk->lm.sec_per_line, line->cur_sec);
703         spin_unlock(&line->lock);
704
705         return paddr;
706 }
707
708 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
709 {
710         struct nvm_tgt_dev *dev = pblk->dev;
711         struct nvm_geo *geo = &dev->geo;
712         struct pblk_line_meta *lm = &pblk->lm;
713         int bit;
714
715         /* This usually only happens on bad lines */
716         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
717         if (bit >= lm->blk_per_line)
718                 return -1;
719
720         return bit * geo->ws_opt;
721 }
722
723 int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
724 {
725         struct nvm_tgt_dev *dev = pblk->dev;
726         struct pblk_line_meta *lm = &pblk->lm;
727         struct bio *bio;
728         struct nvm_rq rqd;
729         u64 paddr = pblk_line_smeta_start(pblk, line);
730         int i, ret;
731
732         memset(&rqd, 0, sizeof(struct nvm_rq));
733
734         ret = pblk_alloc_rqd_meta(pblk, &rqd);
735         if (ret)
736                 return ret;
737
738         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
739         if (IS_ERR(bio)) {
740                 ret = PTR_ERR(bio);
741                 goto clear_rqd;
742         }
743
744         bio->bi_iter.bi_sector = 0; /* internal bio */
745         bio_set_op_attrs(bio, REQ_OP_READ, 0);
746
747         rqd.bio = bio;
748         rqd.opcode = NVM_OP_PREAD;
749         rqd.nr_ppas = lm->smeta_sec;
750         rqd.is_seq = 1;
751
752         for (i = 0; i < lm->smeta_sec; i++, paddr++)
753                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
754
755         ret = pblk_submit_io_sync(pblk, &rqd);
756         if (ret) {
757                 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
758                 bio_put(bio);
759                 goto clear_rqd;
760         }
761
762         atomic_dec(&pblk->inflight_io);
763
764         if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
765                 pblk_log_read_err(pblk, &rqd);
766                 ret = -EIO;
767         }
768
769 clear_rqd:
770         pblk_free_rqd_meta(pblk, &rqd);
771         return ret;
772 }
773
774 static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
775                                  u64 paddr)
776 {
777         struct nvm_tgt_dev *dev = pblk->dev;
778         struct pblk_line_meta *lm = &pblk->lm;
779         struct bio *bio;
780         struct nvm_rq rqd;
781         __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
782         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
783         int i, ret;
784
785         memset(&rqd, 0, sizeof(struct nvm_rq));
786
787         ret = pblk_alloc_rqd_meta(pblk, &rqd);
788         if (ret)
789                 return ret;
790
791         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
792         if (IS_ERR(bio)) {
793                 ret = PTR_ERR(bio);
794                 goto clear_rqd;
795         }
796
797         bio->bi_iter.bi_sector = 0; /* internal bio */
798         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
799
800         rqd.bio = bio;
801         rqd.opcode = NVM_OP_PWRITE;
802         rqd.nr_ppas = lm->smeta_sec;
803         rqd.is_seq = 1;
804
805         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
806                 struct pblk_sec_meta *meta = pblk_get_meta(pblk,
807                                                            rqd.meta_list, i);
808
809                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
810                 meta->lba = lba_list[paddr] = addr_empty;
811         }
812
813         ret = pblk_submit_io_sync_sem(pblk, &rqd);
814         if (ret) {
815                 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
816                 bio_put(bio);
817                 goto clear_rqd;
818         }
819
820         atomic_dec(&pblk->inflight_io);
821
822         if (rqd.error) {
823                 pblk_log_write_err(pblk, &rqd);
824                 ret = -EIO;
825         }
826
827 clear_rqd:
828         pblk_free_rqd_meta(pblk, &rqd);
829         return ret;
830 }
831
832 int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
833                          void *emeta_buf)
834 {
835         struct nvm_tgt_dev *dev = pblk->dev;
836         struct nvm_geo *geo = &dev->geo;
837         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
838         struct pblk_line_meta *lm = &pblk->lm;
839         void *ppa_list, *meta_list;
840         struct bio *bio;
841         struct nvm_rq rqd;
842         u64 paddr = line->emeta_ssec;
843         dma_addr_t dma_ppa_list, dma_meta_list;
844         int min = pblk->min_write_pgs;
845         int left_ppas = lm->emeta_sec[0];
846         int line_id = line->id;
847         int rq_ppas, rq_len;
848         int i, j;
849         int ret;
850
851         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
852                                                         &dma_meta_list);
853         if (!meta_list)
854                 return -ENOMEM;
855
856         ppa_list = meta_list + pblk_dma_meta_size(pblk);
857         dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
858
859 next_rq:
860         memset(&rqd, 0, sizeof(struct nvm_rq));
861
862         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
863         rq_len = rq_ppas * geo->csecs;
864
865         bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
866                                         l_mg->emeta_alloc_type, GFP_KERNEL);
867         if (IS_ERR(bio)) {
868                 ret = PTR_ERR(bio);
869                 goto free_rqd_dma;
870         }
871
872         bio->bi_iter.bi_sector = 0; /* internal bio */
873         bio_set_op_attrs(bio, REQ_OP_READ, 0);
874
875         rqd.bio = bio;
876         rqd.meta_list = meta_list;
877         rqd.ppa_list = ppa_list;
878         rqd.dma_meta_list = dma_meta_list;
879         rqd.dma_ppa_list = dma_ppa_list;
880         rqd.opcode = NVM_OP_PREAD;
881         rqd.nr_ppas = rq_ppas;
882
883         for (i = 0; i < rqd.nr_ppas; ) {
884                 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
885                 int pos = pblk_ppa_to_pos(geo, ppa);
886
887                 if (pblk_io_aligned(pblk, rq_ppas))
888                         rqd.is_seq = 1;
889
890                 while (test_bit(pos, line->blk_bitmap)) {
891                         paddr += min;
892                         if (pblk_boundary_paddr_checks(pblk, paddr)) {
893                                 bio_put(bio);
894                                 ret = -EINTR;
895                                 goto free_rqd_dma;
896                         }
897
898                         ppa = addr_to_gen_ppa(pblk, paddr, line_id);
899                         pos = pblk_ppa_to_pos(geo, ppa);
900                 }
901
902                 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
903                         bio_put(bio);
904                         ret = -EINTR;
905                         goto free_rqd_dma;
906                 }
907
908                 for (j = 0; j < min; j++, i++, paddr++)
909                         rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
910         }
911
912         ret = pblk_submit_io_sync(pblk, &rqd);
913         if (ret) {
914                 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
915                 bio_put(bio);
916                 goto free_rqd_dma;
917         }
918
919         atomic_dec(&pblk->inflight_io);
920
921         if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
922                 pblk_log_read_err(pblk, &rqd);
923                 ret = -EIO;
924                 goto free_rqd_dma;
925         }
926
927         emeta_buf += rq_len;
928         left_ppas -= rq_ppas;
929         if (left_ppas)
930                 goto next_rq;
931
932 free_rqd_dma:
933         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
934         return ret;
935 }
936
937 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
938                             struct ppa_addr ppa)
939 {
940         rqd->opcode = NVM_OP_ERASE;
941         rqd->ppa_addr = ppa;
942         rqd->nr_ppas = 1;
943         rqd->is_seq = 1;
944         rqd->bio = NULL;
945 }
946
947 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
948 {
949         struct nvm_rq rqd = {NULL};
950         int ret;
951
952         trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
953                                 PBLK_CHUNK_RESET_START);
954
955         pblk_setup_e_rq(pblk, &rqd, ppa);
956
957         /* The write thread schedules erases so that it minimizes disturbances
958          * with writes. Thus, there is no need to take the LUN semaphore.
959          */
960         ret = pblk_submit_io_sync(pblk, &rqd);
961         rqd.private = pblk;
962         __pblk_end_io_erase(pblk, &rqd);
963
964         return ret;
965 }
966
967 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
968 {
969         struct pblk_line_meta *lm = &pblk->lm;
970         struct ppa_addr ppa;
971         int ret, bit = -1;
972
973         /* Erase only good blocks, one at a time */
974         do {
975                 spin_lock(&line->lock);
976                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
977                                                                 bit + 1);
978                 if (bit >= lm->blk_per_line) {
979                         spin_unlock(&line->lock);
980                         break;
981                 }
982
983                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
984                 ppa.a.blk = line->id;
985
986                 atomic_dec(&line->left_eblks);
987                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
988                 spin_unlock(&line->lock);
989
990                 ret = pblk_blk_erase_sync(pblk, ppa);
991                 if (ret) {
992                         pblk_err(pblk, "failed to erase line %d\n", line->id);
993                         return ret;
994                 }
995         } while (1);
996
997         return 0;
998 }
999
1000 static void pblk_line_setup_metadata(struct pblk_line *line,
1001                                      struct pblk_line_mgmt *l_mg,
1002                                      struct pblk_line_meta *lm)
1003 {
1004         int meta_line;
1005
1006         lockdep_assert_held(&l_mg->free_lock);
1007
1008 retry_meta:
1009         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1010         if (meta_line == PBLK_DATA_LINES) {
1011                 spin_unlock(&l_mg->free_lock);
1012                 io_schedule();
1013                 spin_lock(&l_mg->free_lock);
1014                 goto retry_meta;
1015         }
1016
1017         set_bit(meta_line, &l_mg->meta_bitmap);
1018         line->meta_line = meta_line;
1019
1020         line->smeta = l_mg->sline_meta[meta_line];
1021         line->emeta = l_mg->eline_meta[meta_line];
1022
1023         memset(line->smeta, 0, lm->smeta_len);
1024         memset(line->emeta->buf, 0, lm->emeta_len[0]);
1025
1026         line->emeta->mem = 0;
1027         atomic_set(&line->emeta->sync, 0);
1028 }
1029
1030 /* For now lines are always assumed full lines. Thus, smeta former and current
1031  * lun bitmaps are omitted.
1032  */
1033 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
1034                                   struct pblk_line *cur)
1035 {
1036         struct nvm_tgt_dev *dev = pblk->dev;
1037         struct nvm_geo *geo = &dev->geo;
1038         struct pblk_line_meta *lm = &pblk->lm;
1039         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1040         struct pblk_emeta *emeta = line->emeta;
1041         struct line_emeta *emeta_buf = emeta->buf;
1042         struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
1043         int nr_blk_line;
1044
1045         /* After erasing the line, new bad blocks might appear and we risk
1046          * having an invalid line
1047          */
1048         nr_blk_line = lm->blk_per_line -
1049                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
1050         if (nr_blk_line < lm->min_blk_line) {
1051                 spin_lock(&l_mg->free_lock);
1052                 spin_lock(&line->lock);
1053                 line->state = PBLK_LINESTATE_BAD;
1054                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1055                                         line->state);
1056                 spin_unlock(&line->lock);
1057
1058                 list_add_tail(&line->list, &l_mg->bad_list);
1059                 spin_unlock(&l_mg->free_lock);
1060
1061                 pblk_debug(pblk, "line %d is bad\n", line->id);
1062
1063                 return 0;
1064         }
1065
1066         /* Run-time metadata */
1067         line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
1068
1069         /* Mark LUNs allocated in this line (all for now) */
1070         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1071
1072         smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1073         guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid);
1074         smeta_buf->header.id = cpu_to_le32(line->id);
1075         smeta_buf->header.type = cpu_to_le16(line->type);
1076         smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1077         smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
1078
1079         /* Start metadata */
1080         smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1081         smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1082
1083         /* Fill metadata among lines */
1084         if (cur) {
1085                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1086                 smeta_buf->prev_id = cpu_to_le32(cur->id);
1087                 cur->emeta->buf->next_id = cpu_to_le32(line->id);
1088         } else {
1089                 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1090         }
1091
1092         /* All smeta must be set at this point */
1093         smeta_buf->header.crc = cpu_to_le32(
1094                         pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1095         smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1096
1097         /* End metadata */
1098         memcpy(&emeta_buf->header, &smeta_buf->header,
1099                                                 sizeof(struct line_header));
1100
1101         emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1102         emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1103         emeta_buf->header.crc = cpu_to_le32(
1104                         pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1105
1106         emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1107         emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1108         emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1109         emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1110         emeta_buf->crc = cpu_to_le32(0);
1111         emeta_buf->prev_id = smeta_buf->prev_id;
1112
1113         return 1;
1114 }
1115
1116 static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1117 {
1118         struct pblk_line_meta *lm = &pblk->lm;
1119         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1120
1121         line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1122         if (!line->map_bitmap)
1123                 return -ENOMEM;
1124
1125         memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1126
1127         /* will be initialized using bb info from map_bitmap */
1128         line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1129         if (!line->invalid_bitmap) {
1130                 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1131                 line->map_bitmap = NULL;
1132                 return -ENOMEM;
1133         }
1134
1135         return 0;
1136 }
1137
1138 /* For now lines are always assumed full lines. Thus, smeta former and current
1139  * lun bitmaps are omitted.
1140  */
1141 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1142                              int init)
1143 {
1144         struct nvm_tgt_dev *dev = pblk->dev;
1145         struct nvm_geo *geo = &dev->geo;
1146         struct pblk_line_meta *lm = &pblk->lm;
1147         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1148         u64 off;
1149         int bit = -1;
1150         int emeta_secs;
1151
1152         line->sec_in_line = lm->sec_per_line;
1153
1154         /* Capture bad block information on line mapping bitmaps */
1155         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1156                                         bit + 1)) < lm->blk_per_line) {
1157                 off = bit * geo->ws_opt;
1158                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1159                                                         lm->sec_per_line);
1160                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1161                                                         lm->sec_per_line);
1162                 line->sec_in_line -= geo->clba;
1163         }
1164
1165         /* Mark smeta metadata sectors as bad sectors */
1166         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1167         off = bit * geo->ws_opt;
1168         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1169         line->sec_in_line -= lm->smeta_sec;
1170         line->cur_sec = off + lm->smeta_sec;
1171
1172         if (init && pblk_line_smeta_write(pblk, line, off)) {
1173                 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1174                 return 0;
1175         }
1176
1177         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1178
1179         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1180          * blocks to make sure that there are enough sectors to store emeta
1181          */
1182         emeta_secs = lm->emeta_sec[0];
1183         off = lm->sec_per_line;
1184         while (emeta_secs) {
1185                 off -= geo->ws_opt;
1186                 if (!test_bit(off, line->invalid_bitmap)) {
1187                         bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1188                         emeta_secs -= geo->ws_opt;
1189                 }
1190         }
1191
1192         line->emeta_ssec = off;
1193         line->sec_in_line -= lm->emeta_sec[0];
1194         line->nr_valid_lbas = 0;
1195         line->left_msecs = line->sec_in_line;
1196         *line->vsc = cpu_to_le32(line->sec_in_line);
1197
1198         if (lm->sec_per_line - line->sec_in_line !=
1199                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1200                 spin_lock(&line->lock);
1201                 line->state = PBLK_LINESTATE_BAD;
1202                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1203                                         line->state);
1204                 spin_unlock(&line->lock);
1205
1206                 list_add_tail(&line->list, &l_mg->bad_list);
1207                 pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1208
1209                 return 0;
1210         }
1211
1212         return 1;
1213 }
1214
1215 static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1216 {
1217         struct pblk_line_meta *lm = &pblk->lm;
1218         struct nvm_tgt_dev *dev = pblk->dev;
1219         struct nvm_geo *geo = &dev->geo;
1220         int blk_to_erase = atomic_read(&line->blk_in_line);
1221         int i;
1222
1223         for (i = 0; i < lm->blk_per_line; i++) {
1224                 struct pblk_lun *rlun = &pblk->luns[i];
1225                 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1226                 int state = line->chks[pos].state;
1227
1228                 /* Free chunks should not be erased */
1229                 if (state & NVM_CHK_ST_FREE) {
1230                         set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1231                                                         line->erase_bitmap);
1232                         blk_to_erase--;
1233                 }
1234         }
1235
1236         return blk_to_erase;
1237 }
1238
1239 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1240 {
1241         struct pblk_line_meta *lm = &pblk->lm;
1242         int blk_in_line = atomic_read(&line->blk_in_line);
1243         int blk_to_erase;
1244
1245         /* Bad blocks do not need to be erased */
1246         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1247
1248         spin_lock(&line->lock);
1249
1250         /* If we have not written to this line, we need to mark up free chunks
1251          * as already erased
1252          */
1253         if (line->state == PBLK_LINESTATE_NEW) {
1254                 blk_to_erase = pblk_prepare_new_line(pblk, line);
1255                 line->state = PBLK_LINESTATE_FREE;
1256                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1257                                         line->state);
1258         } else {
1259                 blk_to_erase = blk_in_line;
1260         }
1261
1262         if (blk_in_line < lm->min_blk_line) {
1263                 spin_unlock(&line->lock);
1264                 return -EAGAIN;
1265         }
1266
1267         if (line->state != PBLK_LINESTATE_FREE) {
1268                 WARN(1, "pblk: corrupted line %d, state %d\n",
1269                                                         line->id, line->state);
1270                 spin_unlock(&line->lock);
1271                 return -EINTR;
1272         }
1273
1274         line->state = PBLK_LINESTATE_OPEN;
1275         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1276                                 line->state);
1277
1278         atomic_set(&line->left_eblks, blk_to_erase);
1279         atomic_set(&line->left_seblks, blk_to_erase);
1280
1281         line->meta_distance = lm->meta_distance;
1282         spin_unlock(&line->lock);
1283
1284         kref_init(&line->ref);
1285         atomic_set(&line->sec_to_update, 0);
1286
1287         return 0;
1288 }
1289
1290 /* Line allocations in the recovery path are always single threaded */
1291 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1292 {
1293         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1294         int ret;
1295
1296         spin_lock(&l_mg->free_lock);
1297         l_mg->data_line = line;
1298         list_del(&line->list);
1299
1300         ret = pblk_line_prepare(pblk, line);
1301         if (ret) {
1302                 list_add(&line->list, &l_mg->free_list);
1303                 spin_unlock(&l_mg->free_lock);
1304                 return ret;
1305         }
1306         spin_unlock(&l_mg->free_lock);
1307
1308         ret = pblk_line_alloc_bitmaps(pblk, line);
1309         if (ret)
1310                 goto fail;
1311
1312         if (!pblk_line_init_bb(pblk, line, 0)) {
1313                 ret = -EINTR;
1314                 goto fail;
1315         }
1316
1317         pblk_rl_free_lines_dec(&pblk->rl, line, true);
1318         return 0;
1319
1320 fail:
1321         spin_lock(&l_mg->free_lock);
1322         list_add(&line->list, &l_mg->free_list);
1323         spin_unlock(&l_mg->free_lock);
1324
1325         return ret;
1326 }
1327
1328 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1329 {
1330         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1331
1332         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1333         line->map_bitmap = NULL;
1334         line->smeta = NULL;
1335         line->emeta = NULL;
1336 }
1337
1338 static void pblk_line_reinit(struct pblk_line *line)
1339 {
1340         *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1341
1342         line->map_bitmap = NULL;
1343         line->invalid_bitmap = NULL;
1344         line->smeta = NULL;
1345         line->emeta = NULL;
1346 }
1347
1348 void pblk_line_free(struct pblk_line *line)
1349 {
1350         struct pblk *pblk = line->pblk;
1351         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1352
1353         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1354         mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
1355
1356         pblk_line_reinit(line);
1357 }
1358
1359 struct pblk_line *pblk_line_get(struct pblk *pblk)
1360 {
1361         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1362         struct pblk_line_meta *lm = &pblk->lm;
1363         struct pblk_line *line;
1364         int ret, bit;
1365
1366         lockdep_assert_held(&l_mg->free_lock);
1367
1368 retry:
1369         if (list_empty(&l_mg->free_list)) {
1370                 pblk_err(pblk, "no free lines\n");
1371                 return NULL;
1372         }
1373
1374         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1375         list_del(&line->list);
1376         l_mg->nr_free_lines--;
1377
1378         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1379         if (unlikely(bit >= lm->blk_per_line)) {
1380                 spin_lock(&line->lock);
1381                 line->state = PBLK_LINESTATE_BAD;
1382                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1383                                         line->state);
1384                 spin_unlock(&line->lock);
1385
1386                 list_add_tail(&line->list, &l_mg->bad_list);
1387
1388                 pblk_debug(pblk, "line %d is bad\n", line->id);
1389                 goto retry;
1390         }
1391
1392         ret = pblk_line_prepare(pblk, line);
1393         if (ret) {
1394                 switch (ret) {
1395                 case -EAGAIN:
1396                         list_add(&line->list, &l_mg->bad_list);
1397                         goto retry;
1398                 case -EINTR:
1399                         list_add(&line->list, &l_mg->corrupt_list);
1400                         goto retry;
1401                 default:
1402                         pblk_err(pblk, "failed to prepare line %d\n", line->id);
1403                         list_add(&line->list, &l_mg->free_list);
1404                         l_mg->nr_free_lines++;
1405                         return NULL;
1406                 }
1407         }
1408
1409         return line;
1410 }
1411
1412 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1413                                          struct pblk_line *line)
1414 {
1415         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1416         struct pblk_line *retry_line;
1417
1418 retry:
1419         spin_lock(&l_mg->free_lock);
1420         retry_line = pblk_line_get(pblk);
1421         if (!retry_line) {
1422                 l_mg->data_line = NULL;
1423                 spin_unlock(&l_mg->free_lock);
1424                 return NULL;
1425         }
1426
1427         retry_line->map_bitmap = line->map_bitmap;
1428         retry_line->invalid_bitmap = line->invalid_bitmap;
1429         retry_line->smeta = line->smeta;
1430         retry_line->emeta = line->emeta;
1431         retry_line->meta_line = line->meta_line;
1432
1433         pblk_line_reinit(line);
1434
1435         l_mg->data_line = retry_line;
1436         spin_unlock(&l_mg->free_lock);
1437
1438         pblk_rl_free_lines_dec(&pblk->rl, line, false);
1439
1440         if (pblk_line_erase(pblk, retry_line))
1441                 goto retry;
1442
1443         return retry_line;
1444 }
1445
1446 static void pblk_set_space_limit(struct pblk *pblk)
1447 {
1448         struct pblk_rl *rl = &pblk->rl;
1449
1450         atomic_set(&rl->rb_space, 0);
1451 }
1452
1453 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1454 {
1455         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1456         struct pblk_line *line;
1457
1458         spin_lock(&l_mg->free_lock);
1459         line = pblk_line_get(pblk);
1460         if (!line) {
1461                 spin_unlock(&l_mg->free_lock);
1462                 return NULL;
1463         }
1464
1465         line->seq_nr = l_mg->d_seq_nr++;
1466         line->type = PBLK_LINETYPE_DATA;
1467         l_mg->data_line = line;
1468
1469         pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1470
1471         /* Allocate next line for preparation */
1472         l_mg->data_next = pblk_line_get(pblk);
1473         if (!l_mg->data_next) {
1474                 /* If we cannot get a new line, we need to stop the pipeline.
1475                  * Only allow as many writes in as we can store safely and then
1476                  * fail gracefully
1477                  */
1478                 pblk_set_space_limit(pblk);
1479
1480                 l_mg->data_next = NULL;
1481         } else {
1482                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1483                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1484         }
1485         spin_unlock(&l_mg->free_lock);
1486
1487         if (pblk_line_alloc_bitmaps(pblk, line))
1488                 return NULL;
1489
1490         if (pblk_line_erase(pblk, line)) {
1491                 line = pblk_line_retry(pblk, line);
1492                 if (!line)
1493                         return NULL;
1494         }
1495
1496 retry_setup:
1497         if (!pblk_line_init_metadata(pblk, line, NULL)) {
1498                 line = pblk_line_retry(pblk, line);
1499                 if (!line)
1500                         return NULL;
1501
1502                 goto retry_setup;
1503         }
1504
1505         if (!pblk_line_init_bb(pblk, line, 1)) {
1506                 line = pblk_line_retry(pblk, line);
1507                 if (!line)
1508                         return NULL;
1509
1510                 goto retry_setup;
1511         }
1512
1513         pblk_rl_free_lines_dec(&pblk->rl, line, true);
1514
1515         return line;
1516 }
1517
1518 void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1519 {
1520         struct pblk_line *line;
1521
1522         line = pblk_ppa_to_line(pblk, ppa);
1523         kref_put(&line->ref, pblk_line_put_wq);
1524 }
1525
1526 void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1527 {
1528         struct ppa_addr *ppa_list;
1529         int i;
1530
1531         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
1532
1533         for (i = 0; i < rqd->nr_ppas; i++)
1534                 pblk_ppa_to_line_put(pblk, ppa_list[i]);
1535 }
1536
1537 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1538 {
1539         lockdep_assert_held(&pblk->l_mg.free_lock);
1540
1541         pblk_set_space_limit(pblk);
1542         pblk->state = PBLK_STATE_STOPPING;
1543         trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1544 }
1545
1546 static void pblk_line_close_meta_sync(struct pblk *pblk)
1547 {
1548         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1549         struct pblk_line_meta *lm = &pblk->lm;
1550         struct pblk_line *line, *tline;
1551         LIST_HEAD(list);
1552
1553         spin_lock(&l_mg->close_lock);
1554         if (list_empty(&l_mg->emeta_list)) {
1555                 spin_unlock(&l_mg->close_lock);
1556                 return;
1557         }
1558
1559         list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1560         spin_unlock(&l_mg->close_lock);
1561
1562         list_for_each_entry_safe(line, tline, &list, list) {
1563                 struct pblk_emeta *emeta = line->emeta;
1564
1565                 while (emeta->mem < lm->emeta_len[0]) {
1566                         int ret;
1567
1568                         ret = pblk_submit_meta_io(pblk, line);
1569                         if (ret) {
1570                                 pblk_err(pblk, "sync meta line %d failed (%d)\n",
1571                                                         line->id, ret);
1572                                 return;
1573                         }
1574                 }
1575         }
1576
1577         pblk_wait_for_meta(pblk);
1578         flush_workqueue(pblk->close_wq);
1579 }
1580
1581 void __pblk_pipeline_flush(struct pblk *pblk)
1582 {
1583         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1584         int ret;
1585
1586         spin_lock(&l_mg->free_lock);
1587         if (pblk->state == PBLK_STATE_RECOVERING ||
1588                                         pblk->state == PBLK_STATE_STOPPED) {
1589                 spin_unlock(&l_mg->free_lock);
1590                 return;
1591         }
1592         pblk->state = PBLK_STATE_RECOVERING;
1593         trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1594         spin_unlock(&l_mg->free_lock);
1595
1596         pblk_flush_writer(pblk);
1597         pblk_wait_for_meta(pblk);
1598
1599         ret = pblk_recov_pad(pblk);
1600         if (ret) {
1601                 pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1602                 return;
1603         }
1604
1605         flush_workqueue(pblk->bb_wq);
1606         pblk_line_close_meta_sync(pblk);
1607 }
1608
1609 void __pblk_pipeline_stop(struct pblk *pblk)
1610 {
1611         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1612
1613         spin_lock(&l_mg->free_lock);
1614         pblk->state = PBLK_STATE_STOPPED;
1615         trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1616         l_mg->data_line = NULL;
1617         l_mg->data_next = NULL;
1618         spin_unlock(&l_mg->free_lock);
1619 }
1620
1621 void pblk_pipeline_stop(struct pblk *pblk)
1622 {
1623         __pblk_pipeline_flush(pblk);
1624         __pblk_pipeline_stop(pblk);
1625 }
1626
1627 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1628 {
1629         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1630         struct pblk_line *cur, *new = NULL;
1631         unsigned int left_seblks;
1632
1633         new = l_mg->data_next;
1634         if (!new)
1635                 goto out;
1636
1637         spin_lock(&l_mg->free_lock);
1638         cur = l_mg->data_line;
1639         l_mg->data_line = new;
1640
1641         pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1642         spin_unlock(&l_mg->free_lock);
1643
1644 retry_erase:
1645         left_seblks = atomic_read(&new->left_seblks);
1646         if (left_seblks) {
1647                 /* If line is not fully erased, erase it */
1648                 if (atomic_read(&new->left_eblks)) {
1649                         if (pblk_line_erase(pblk, new))
1650                                 goto out;
1651                 } else {
1652                         io_schedule();
1653                 }
1654                 goto retry_erase;
1655         }
1656
1657         if (pblk_line_alloc_bitmaps(pblk, new))
1658                 return NULL;
1659
1660 retry_setup:
1661         if (!pblk_line_init_metadata(pblk, new, cur)) {
1662                 new = pblk_line_retry(pblk, new);
1663                 if (!new)
1664                         goto out;
1665
1666                 goto retry_setup;
1667         }
1668
1669         if (!pblk_line_init_bb(pblk, new, 1)) {
1670                 new = pblk_line_retry(pblk, new);
1671                 if (!new)
1672                         goto out;
1673
1674                 goto retry_setup;
1675         }
1676
1677         pblk_rl_free_lines_dec(&pblk->rl, new, true);
1678
1679         /* Allocate next line for preparation */
1680         spin_lock(&l_mg->free_lock);
1681         l_mg->data_next = pblk_line_get(pblk);
1682         if (!l_mg->data_next) {
1683                 /* If we cannot get a new line, we need to stop the pipeline.
1684                  * Only allow as many writes in as we can store safely and then
1685                  * fail gracefully
1686                  */
1687                 pblk_stop_writes(pblk, new);
1688                 l_mg->data_next = NULL;
1689         } else {
1690                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1691                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1692         }
1693         spin_unlock(&l_mg->free_lock);
1694
1695 out:
1696         return new;
1697 }
1698
1699 static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1700 {
1701         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1702         struct pblk_gc *gc = &pblk->gc;
1703
1704         spin_lock(&line->lock);
1705         WARN_ON(line->state != PBLK_LINESTATE_GC);
1706         if (line->w_err_gc->has_gc_err) {
1707                 spin_unlock(&line->lock);
1708                 pblk_err(pblk, "line %d had errors during GC\n", line->id);
1709                 pblk_put_line_back(pblk, line);
1710                 line->w_err_gc->has_gc_err = 0;
1711                 return;
1712         }
1713
1714         line->state = PBLK_LINESTATE_FREE;
1715         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1716                                         line->state);
1717         line->gc_group = PBLK_LINEGC_NONE;
1718         pblk_line_free(line);
1719
1720         if (line->w_err_gc->has_write_err) {
1721                 pblk_rl_werr_line_out(&pblk->rl);
1722                 line->w_err_gc->has_write_err = 0;
1723         }
1724
1725         spin_unlock(&line->lock);
1726         atomic_dec(&gc->pipeline_gc);
1727
1728         spin_lock(&l_mg->free_lock);
1729         list_add_tail(&line->list, &l_mg->free_list);
1730         l_mg->nr_free_lines++;
1731         spin_unlock(&l_mg->free_lock);
1732
1733         pblk_rl_free_lines_inc(&pblk->rl, line);
1734 }
1735
1736 static void pblk_line_put_ws(struct work_struct *work)
1737 {
1738         struct pblk_line_ws *line_put_ws = container_of(work,
1739                                                 struct pblk_line_ws, ws);
1740         struct pblk *pblk = line_put_ws->pblk;
1741         struct pblk_line *line = line_put_ws->line;
1742
1743         __pblk_line_put(pblk, line);
1744         mempool_free(line_put_ws, &pblk->gen_ws_pool);
1745 }
1746
1747 void pblk_line_put(struct kref *ref)
1748 {
1749         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1750         struct pblk *pblk = line->pblk;
1751
1752         __pblk_line_put(pblk, line);
1753 }
1754
1755 void pblk_line_put_wq(struct kref *ref)
1756 {
1757         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1758         struct pblk *pblk = line->pblk;
1759         struct pblk_line_ws *line_put_ws;
1760
1761         line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1762         if (!line_put_ws)
1763                 return;
1764
1765         line_put_ws->pblk = pblk;
1766         line_put_ws->line = line;
1767         line_put_ws->priv = NULL;
1768
1769         INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1770         queue_work(pblk->r_end_wq, &line_put_ws->ws);
1771 }
1772
1773 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1774 {
1775         struct nvm_rq *rqd;
1776         int err;
1777
1778         rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1779
1780         pblk_setup_e_rq(pblk, rqd, ppa);
1781
1782         rqd->end_io = pblk_end_io_erase;
1783         rqd->private = pblk;
1784
1785         trace_pblk_chunk_reset(pblk_disk_name(pblk),
1786                                 &ppa, PBLK_CHUNK_RESET_START);
1787
1788         /* The write thread schedules erases so that it minimizes disturbances
1789          * with writes. Thus, there is no need to take the LUN semaphore.
1790          */
1791         err = pblk_submit_io(pblk, rqd);
1792         if (err) {
1793                 struct nvm_tgt_dev *dev = pblk->dev;
1794                 struct nvm_geo *geo = &dev->geo;
1795
1796                 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1797                                         pblk_ppa_to_line_id(ppa),
1798                                         pblk_ppa_to_pos(geo, ppa));
1799         }
1800
1801         return err;
1802 }
1803
1804 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1805 {
1806         return pblk->l_mg.data_line;
1807 }
1808
1809 /* For now, always erase next line */
1810 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1811 {
1812         return pblk->l_mg.data_next;
1813 }
1814
1815 int pblk_line_is_full(struct pblk_line *line)
1816 {
1817         return (line->left_msecs == 0);
1818 }
1819
1820 static void pblk_line_should_sync_meta(struct pblk *pblk)
1821 {
1822         if (pblk_rl_is_limit(&pblk->rl))
1823                 pblk_line_close_meta_sync(pblk);
1824 }
1825
1826 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1827 {
1828         struct nvm_tgt_dev *dev = pblk->dev;
1829         struct nvm_geo *geo = &dev->geo;
1830         struct pblk_line_meta *lm = &pblk->lm;
1831         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1832         struct list_head *move_list;
1833         int i;
1834
1835 #ifdef CONFIG_NVM_PBLK_DEBUG
1836         WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1837                                 "pblk: corrupt closed line %d\n", line->id);
1838 #endif
1839
1840         spin_lock(&l_mg->free_lock);
1841         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1842         spin_unlock(&l_mg->free_lock);
1843
1844         spin_lock(&l_mg->gc_lock);
1845         spin_lock(&line->lock);
1846         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1847         line->state = PBLK_LINESTATE_CLOSED;
1848         move_list = pblk_line_gc_list(pblk, line);
1849         list_add_tail(&line->list, move_list);
1850
1851         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1852         line->map_bitmap = NULL;
1853         line->smeta = NULL;
1854         line->emeta = NULL;
1855
1856         for (i = 0; i < lm->blk_per_line; i++) {
1857                 struct pblk_lun *rlun = &pblk->luns[i];
1858                 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1859                 int state = line->chks[pos].state;
1860
1861                 if (!(state & NVM_CHK_ST_OFFLINE))
1862                         state = NVM_CHK_ST_CLOSED;
1863         }
1864
1865         spin_unlock(&line->lock);
1866         spin_unlock(&l_mg->gc_lock);
1867
1868         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1869                                         line->state);
1870 }
1871
1872 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1873 {
1874         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1875         struct pblk_line_meta *lm = &pblk->lm;
1876         struct pblk_emeta *emeta = line->emeta;
1877         struct line_emeta *emeta_buf = emeta->buf;
1878         struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1879
1880         /* No need for exact vsc value; avoid a big line lock and take aprox. */
1881         memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1882         memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1883
1884         wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1885         wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1886         wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1887
1888         if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1889                 emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1890                 guid_copy((guid_t *)&emeta_buf->header.uuid,
1891                                                         &pblk->instance_uuid);
1892                 emeta_buf->header.id = cpu_to_le32(line->id);
1893                 emeta_buf->header.type = cpu_to_le16(line->type);
1894                 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1895                 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1896                 emeta_buf->header.crc = cpu_to_le32(
1897                         pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1898         }
1899
1900         emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1901         emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1902
1903         spin_lock(&l_mg->close_lock);
1904         spin_lock(&line->lock);
1905
1906         /* Update the in-memory start address for emeta, in case it has
1907          * shifted due to write errors
1908          */
1909         if (line->emeta_ssec != line->cur_sec)
1910                 line->emeta_ssec = line->cur_sec;
1911
1912         list_add_tail(&line->list, &l_mg->emeta_list);
1913         spin_unlock(&line->lock);
1914         spin_unlock(&l_mg->close_lock);
1915
1916         pblk_line_should_sync_meta(pblk);
1917 }
1918
1919 static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1920 {
1921         struct pblk_line_meta *lm = &pblk->lm;
1922         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1923         unsigned int lba_list_size = lm->emeta_len[2];
1924         struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1925         struct pblk_emeta *emeta = line->emeta;
1926
1927         w_err_gc->lba_list = pblk_malloc(lba_list_size,
1928                                          l_mg->emeta_alloc_type, GFP_KERNEL);
1929         memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1930                                 lba_list_size);
1931 }
1932
1933 void pblk_line_close_ws(struct work_struct *work)
1934 {
1935         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1936                                                                         ws);
1937         struct pblk *pblk = line_ws->pblk;
1938         struct pblk_line *line = line_ws->line;
1939         struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1940
1941         /* Write errors makes the emeta start address stored in smeta invalid,
1942          * so keep a copy of the lba list until we've gc'd the line
1943          */
1944         if (w_err_gc->has_write_err)
1945                 pblk_save_lba_list(pblk, line);
1946
1947         pblk_line_close(pblk, line);
1948         mempool_free(line_ws, &pblk->gen_ws_pool);
1949 }
1950
1951 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1952                       void (*work)(struct work_struct *), gfp_t gfp_mask,
1953                       struct workqueue_struct *wq)
1954 {
1955         struct pblk_line_ws *line_ws;
1956
1957         line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1958
1959         line_ws->pblk = pblk;
1960         line_ws->line = line;
1961         line_ws->priv = priv;
1962
1963         INIT_WORK(&line_ws->ws, work);
1964         queue_work(wq, &line_ws->ws);
1965 }
1966
1967 static void __pblk_down_chunk(struct pblk *pblk, int pos)
1968 {
1969         struct pblk_lun *rlun = &pblk->luns[pos];
1970         int ret;
1971
1972         /*
1973          * Only send one inflight I/O per LUN. Since we map at a page
1974          * granurality, all ppas in the I/O will map to the same LUN
1975          */
1976
1977         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1978         if (ret == -ETIME || ret == -EINTR)
1979                 pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1980                                 -ret);
1981 }
1982
1983 void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1984 {
1985         struct nvm_tgt_dev *dev = pblk->dev;
1986         struct nvm_geo *geo = &dev->geo;
1987         int pos = pblk_ppa_to_pos(geo, ppa);
1988
1989         __pblk_down_chunk(pblk, pos);
1990 }
1991
1992 void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1993                   unsigned long *lun_bitmap)
1994 {
1995         struct nvm_tgt_dev *dev = pblk->dev;
1996         struct nvm_geo *geo = &dev->geo;
1997         int pos = pblk_ppa_to_pos(geo, ppa);
1998
1999         /* If the LUN has been locked for this same request, do no attempt to
2000          * lock it again
2001          */
2002         if (test_and_set_bit(pos, lun_bitmap))
2003                 return;
2004
2005         __pblk_down_chunk(pblk, pos);
2006 }
2007
2008 void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
2009 {
2010         struct nvm_tgt_dev *dev = pblk->dev;
2011         struct nvm_geo *geo = &dev->geo;
2012         struct pblk_lun *rlun;
2013         int pos = pblk_ppa_to_pos(geo, ppa);
2014
2015         rlun = &pblk->luns[pos];
2016         up(&rlun->wr_sem);
2017 }
2018
2019 void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
2020 {
2021         struct nvm_tgt_dev *dev = pblk->dev;
2022         struct nvm_geo *geo = &dev->geo;
2023         struct pblk_lun *rlun;
2024         int num_lun = geo->all_luns;
2025         int bit = -1;
2026
2027         while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
2028                 rlun = &pblk->luns[bit];
2029                 up(&rlun->wr_sem);
2030         }
2031 }
2032
2033 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2034 {
2035         struct ppa_addr ppa_l2p;
2036
2037         /* logic error: lba out-of-bounds. Ignore update */
2038         if (!(lba < pblk->capacity)) {
2039                 WARN(1, "pblk: corrupted L2P map request\n");
2040                 return;
2041         }
2042
2043         spin_lock(&pblk->trans_lock);
2044         ppa_l2p = pblk_trans_map_get(pblk, lba);
2045
2046         if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
2047                 pblk_map_invalidate(pblk, ppa_l2p);
2048
2049         pblk_trans_map_set(pblk, lba, ppa);
2050         spin_unlock(&pblk->trans_lock);
2051 }
2052
2053 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2054 {
2055
2056 #ifdef CONFIG_NVM_PBLK_DEBUG
2057         /* Callers must ensure that the ppa points to a cache address */
2058         BUG_ON(!pblk_addr_in_cache(ppa));
2059         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
2060 #endif
2061
2062         pblk_update_map(pblk, lba, ppa);
2063 }
2064
2065 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
2066                        struct pblk_line *gc_line, u64 paddr_gc)
2067 {
2068         struct ppa_addr ppa_l2p, ppa_gc;
2069         int ret = 1;
2070
2071 #ifdef CONFIG_NVM_PBLK_DEBUG
2072         /* Callers must ensure that the ppa points to a cache address */
2073         BUG_ON(!pblk_addr_in_cache(ppa_new));
2074         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
2075 #endif
2076
2077         /* logic error: lba out-of-bounds. Ignore update */
2078         if (!(lba < pblk->capacity)) {
2079                 WARN(1, "pblk: corrupted L2P map request\n");
2080                 return 0;
2081         }
2082
2083         spin_lock(&pblk->trans_lock);
2084         ppa_l2p = pblk_trans_map_get(pblk, lba);
2085         ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
2086
2087         if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
2088                 spin_lock(&gc_line->lock);
2089                 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
2090                                                 "pblk: corrupted GC update");
2091                 spin_unlock(&gc_line->lock);
2092
2093                 ret = 0;
2094                 goto out;
2095         }
2096
2097         pblk_trans_map_set(pblk, lba, ppa_new);
2098 out:
2099         spin_unlock(&pblk->trans_lock);
2100         return ret;
2101 }
2102
2103 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2104                          struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2105 {
2106         struct ppa_addr ppa_l2p;
2107
2108 #ifdef CONFIG_NVM_PBLK_DEBUG
2109         /* Callers must ensure that the ppa points to a device address */
2110         BUG_ON(pblk_addr_in_cache(ppa_mapped));
2111 #endif
2112         /* Invalidate and discard padded entries */
2113         if (lba == ADDR_EMPTY) {
2114                 atomic64_inc(&pblk->pad_wa);
2115 #ifdef CONFIG_NVM_PBLK_DEBUG
2116                 atomic_long_inc(&pblk->padded_wb);
2117 #endif
2118                 if (!pblk_ppa_empty(ppa_mapped))
2119                         pblk_map_invalidate(pblk, ppa_mapped);
2120                 return;
2121         }
2122
2123         /* logic error: lba out-of-bounds. Ignore update */
2124         if (!(lba < pblk->capacity)) {
2125                 WARN(1, "pblk: corrupted L2P map request\n");
2126                 return;
2127         }
2128
2129         spin_lock(&pblk->trans_lock);
2130         ppa_l2p = pblk_trans_map_get(pblk, lba);
2131
2132         /* Do not update L2P if the cacheline has been updated. In this case,
2133          * the mapped ppa must be invalidated
2134          */
2135         if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2136                 if (!pblk_ppa_empty(ppa_mapped))
2137                         pblk_map_invalidate(pblk, ppa_mapped);
2138                 goto out;
2139         }
2140
2141 #ifdef CONFIG_NVM_PBLK_DEBUG
2142         WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2143 #endif
2144
2145         pblk_trans_map_set(pblk, lba, ppa_mapped);
2146 out:
2147         spin_unlock(&pblk->trans_lock);
2148 }
2149
2150 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2151                          sector_t blba, int nr_secs)
2152 {
2153         int i;
2154
2155         spin_lock(&pblk->trans_lock);
2156         for (i = 0; i < nr_secs; i++) {
2157                 struct ppa_addr ppa;
2158
2159                 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2160
2161                 /* If the L2P entry maps to a line, the reference is valid */
2162                 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2163                         struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2164
2165                         kref_get(&line->ref);
2166                 }
2167         }
2168         spin_unlock(&pblk->trans_lock);
2169 }
2170
2171 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2172                           u64 *lba_list, int nr_secs)
2173 {
2174         u64 lba;
2175         int i;
2176
2177         spin_lock(&pblk->trans_lock);
2178         for (i = 0; i < nr_secs; i++) {
2179                 lba = lba_list[i];
2180                 if (lba != ADDR_EMPTY) {
2181                         /* logic error: lba out-of-bounds. Ignore update */
2182                         if (!(lba < pblk->capacity)) {
2183                                 WARN(1, "pblk: corrupted L2P map request\n");
2184                                 continue;
2185                         }
2186                         ppas[i] = pblk_trans_map_get(pblk, lba);
2187                 }
2188         }
2189         spin_unlock(&pblk->trans_lock);
2190 }
2191
2192 void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
2193 {
2194         void *buffer;
2195
2196         if (pblk_is_oob_meta_supported(pblk)) {
2197                 /* Just use OOB metadata buffer as always */
2198                 buffer = rqd->meta_list;
2199         } else {
2200                 /* We need to reuse last page of request (packed metadata)
2201                  * in similar way as traditional oob metadata
2202                  */
2203                 buffer = page_to_virt(
2204                         rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2205         }
2206
2207         return buffer;
2208 }
2209
2210 void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
2211 {
2212         void *meta_list = rqd->meta_list;
2213         void *page;
2214         int i = 0;
2215
2216         if (pblk_is_oob_meta_supported(pblk))
2217                 return;
2218
2219         page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2220         /* We need to fill oob meta buffer with data from packed metadata */
2221         for (; i < rqd->nr_ppas; i++)
2222                 memcpy(pblk_get_meta(pblk, meta_list, i),
2223                         page + (i * sizeof(struct pblk_sec_meta)),
2224                         sizeof(struct pblk_sec_meta));
2225 }