lightnvm: pblk: verify that cache read is still valid
[linux-2.6-block.git] / drivers / lightnvm / pblk-core.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-core.c - pblk's core functionality
16 *
17 */
18
19#include "pblk.h"
a4bd217b
JG
20
21static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
22 struct ppa_addr *ppa)
23{
24 struct nvm_tgt_dev *dev = pblk->dev;
25 struct nvm_geo *geo = &dev->geo;
26 int pos = pblk_dev_ppa_to_pos(geo, *ppa);
27
28 pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
29 atomic_long_inc(&pblk->erase_failed);
30
a44f53fa 31 atomic_dec(&line->blk_in_line);
a4bd217b
JG
32 if (test_and_set_bit(pos, line->blk_bitmap))
33 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
34 line->id, pos);
35
ef576494 36 pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb, pblk->bb_wq);
a4bd217b
JG
37}
38
39static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
40{
41 struct pblk_line *line;
42
43 line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
44 atomic_dec(&line->left_seblks);
45
46 if (rqd->error) {
47 struct ppa_addr *ppa;
48
49 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
50 if (!ppa)
51 return;
52
53 *ppa = rqd->ppa_addr;
54 pblk_mark_bb(pblk, line, ppa);
55 }
588726d3
JG
56
57 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
58}
59
60/* Erase completion assumes that only one block is erased at the time */
61static void pblk_end_io_erase(struct nvm_rq *rqd)
62{
63 struct pblk *pblk = rqd->private;
64
a4bd217b 65 __pblk_end_io_erase(pblk, rqd);
084ec9ba 66 mempool_free(rqd, pblk->g_rq_pool);
a4bd217b
JG
67}
68
0880a9aa
JG
69void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
70 u64 paddr)
a4bd217b
JG
71{
72 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
73 struct list_head *move_list = NULL;
74
75 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
76 * table is modified with reclaimed sectors, a check is done to endure
77 * that newer updates are not overwritten.
78 */
79 spin_lock(&line->lock);
80 if (line->state == PBLK_LINESTATE_GC ||
81 line->state == PBLK_LINESTATE_FREE) {
82 spin_unlock(&line->lock);
83 return;
84 }
85
86 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
87 WARN_ONCE(1, "pblk: double invalidate\n");
88 spin_unlock(&line->lock);
89 return;
90 }
dd2a4343 91 le32_add_cpu(line->vsc, -1);
a4bd217b
JG
92
93 if (line->state == PBLK_LINESTATE_CLOSED)
94 move_list = pblk_line_gc_list(pblk, line);
95 spin_unlock(&line->lock);
96
97 if (move_list) {
98 spin_lock(&l_mg->gc_lock);
99 spin_lock(&line->lock);
100 /* Prevent moving a line that has just been chosen for GC */
101 if (line->state == PBLK_LINESTATE_GC ||
102 line->state == PBLK_LINESTATE_FREE) {
103 spin_unlock(&line->lock);
104 spin_unlock(&l_mg->gc_lock);
105 return;
106 }
107 spin_unlock(&line->lock);
108
109 list_move_tail(&line->list, move_list);
110 spin_unlock(&l_mg->gc_lock);
111 }
112}
113
114void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
115{
116 struct pblk_line *line;
117 u64 paddr;
118 int line_id;
119
120#ifdef CONFIG_NVM_DEBUG
121 /* Callers must ensure that the ppa points to a device address */
122 BUG_ON(pblk_addr_in_cache(ppa));
123 BUG_ON(pblk_ppa_empty(ppa));
124#endif
125
126 line_id = pblk_tgt_ppa_to_line(ppa);
127 line = &pblk->lines[line_id];
128 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
129
130 __pblk_map_invalidate(pblk, line, paddr);
131}
132
a4bd217b
JG
133static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
134 unsigned int nr_secs)
135{
136 sector_t lba;
137
138 spin_lock(&pblk->trans_lock);
139 for (lba = slba; lba < slba + nr_secs; lba++) {
140 struct ppa_addr ppa;
141
142 ppa = pblk_trans_map_get(pblk, lba);
143
144 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
145 pblk_map_invalidate(pblk, ppa);
146
147 pblk_ppa_set_empty(&ppa);
148 pblk_trans_map_set(pblk, lba, ppa);
149 }
150 spin_unlock(&pblk->trans_lock);
151}
152
153struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
154{
155 mempool_t *pool;
156 struct nvm_rq *rqd;
157 int rq_size;
158
159 if (rw == WRITE) {
160 pool = pblk->w_rq_pool;
161 rq_size = pblk_w_rq_size;
162 } else {
084ec9ba
JG
163 pool = pblk->g_rq_pool;
164 rq_size = pblk_g_rq_size;
a4bd217b
JG
165 }
166
167 rqd = mempool_alloc(pool, GFP_KERNEL);
168 memset(rqd, 0, rq_size);
169
170 return rqd;
171}
172
173void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
174{
175 mempool_t *pool;
176
177 if (rw == WRITE)
178 pool = pblk->w_rq_pool;
179 else
084ec9ba 180 pool = pblk->g_rq_pool;
a4bd217b
JG
181
182 mempool_free(rqd, pool);
183}
184
185void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
186 int nr_pages)
187{
188 struct bio_vec bv;
189 int i;
190
191 WARN_ON(off + nr_pages != bio->bi_vcnt);
192
193 bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
194 for (i = off; i < nr_pages + off; i++) {
195 bv = bio->bi_io_vec[i];
196 mempool_free(bv.bv_page, pblk->page_pool);
197 }
198}
199
200int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
201 int nr_pages)
202{
203 struct request_queue *q = pblk->dev->q;
204 struct page *page;
205 int i, ret;
206
207 for (i = 0; i < nr_pages; i++) {
208 page = mempool_alloc(pblk->page_pool, flags);
209 if (!page)
210 goto err;
211
212 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
213 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
214 pr_err("pblk: could not add page to bio\n");
215 mempool_free(page, pblk->page_pool);
216 goto err;
217 }
218 }
219
220 return 0;
221err:
222 pblk_bio_free_pages(pblk, bio, 0, i - 1);
223 return -1;
224}
225
226static void pblk_write_kick(struct pblk *pblk)
227{
228 wake_up_process(pblk->writer_ts);
229 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
230}
231
232void pblk_write_timer_fn(unsigned long data)
233{
234 struct pblk *pblk = (struct pblk *)data;
235
236 /* kick the write thread every tick to flush outstanding data */
237 pblk_write_kick(pblk);
238}
239
240void pblk_write_should_kick(struct pblk *pblk)
241{
242 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
243
244 if (secs_avail >= pblk->min_write_pgs)
245 pblk_write_kick(pblk);
246}
247
248void pblk_end_bio_sync(struct bio *bio)
249{
250 struct completion *waiting = bio->bi_private;
251
252 complete(waiting);
253}
254
255void pblk_end_io_sync(struct nvm_rq *rqd)
256{
257 struct completion *waiting = rqd->private;
258
259 complete(waiting);
260}
261
588726d3 262void pblk_wait_for_meta(struct pblk *pblk)
a4bd217b 263{
588726d3
JG
264 do {
265 if (!atomic_read(&pblk->inflight_io))
266 break;
a4bd217b 267
588726d3
JG
268 schedule();
269 } while (1);
270}
a4bd217b 271
588726d3
JG
272static void pblk_flush_writer(struct pblk *pblk)
273{
274 pblk_rb_flush(&pblk->rwb);
275 do {
ee8d5c1a 276 if (!pblk_rb_sync_count(&pblk->rwb))
588726d3 277 break;
a4bd217b 278
ee8d5c1a 279 pblk_write_kick(pblk);
588726d3
JG
280 schedule();
281 } while (1);
a4bd217b
JG
282}
283
284struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
285{
286 struct pblk_line_meta *lm = &pblk->lm;
287 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
288 struct list_head *move_list = NULL;
dd2a4343 289 int vsc = le32_to_cpu(*line->vsc);
a4bd217b 290
476118c9
JG
291 lockdep_assert_held(&line->lock);
292
dd2a4343 293 if (!vsc) {
a4bd217b
JG
294 if (line->gc_group != PBLK_LINEGC_FULL) {
295 line->gc_group = PBLK_LINEGC_FULL;
296 move_list = &l_mg->gc_full_list;
297 }
b20ba1bc 298 } else if (vsc < lm->high_thrs) {
a4bd217b
JG
299 if (line->gc_group != PBLK_LINEGC_HIGH) {
300 line->gc_group = PBLK_LINEGC_HIGH;
301 move_list = &l_mg->gc_high_list;
302 }
b20ba1bc 303 } else if (vsc < lm->mid_thrs) {
a4bd217b
JG
304 if (line->gc_group != PBLK_LINEGC_MID) {
305 line->gc_group = PBLK_LINEGC_MID;
306 move_list = &l_mg->gc_mid_list;
307 }
dd2a4343 308 } else if (vsc < line->sec_in_line) {
a4bd217b
JG
309 if (line->gc_group != PBLK_LINEGC_LOW) {
310 line->gc_group = PBLK_LINEGC_LOW;
311 move_list = &l_mg->gc_low_list;
312 }
dd2a4343 313 } else if (vsc == line->sec_in_line) {
a4bd217b
JG
314 if (line->gc_group != PBLK_LINEGC_EMPTY) {
315 line->gc_group = PBLK_LINEGC_EMPTY;
316 move_list = &l_mg->gc_empty_list;
317 }
318 } else {
319 line->state = PBLK_LINESTATE_CORRUPT;
320 line->gc_group = PBLK_LINEGC_NONE;
321 move_list = &l_mg->corrupt_list;
322 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
dd2a4343 323 line->id, vsc,
a4bd217b
JG
324 line->sec_in_line,
325 lm->high_thrs, lm->mid_thrs);
326 }
327
328 return move_list;
329}
330
331void pblk_discard(struct pblk *pblk, struct bio *bio)
332{
333 sector_t slba = pblk_get_lba(bio);
334 sector_t nr_secs = pblk_get_secs(bio);
335
336 pblk_invalidate_range(pblk, slba, nr_secs);
337}
338
339struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
340{
341 struct ppa_addr ppa;
342
343 spin_lock(&pblk->trans_lock);
344 ppa = pblk_trans_map_get(pblk, lba);
345 spin_unlock(&pblk->trans_lock);
346
347 return ppa;
348}
349
350void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
351{
352 atomic_long_inc(&pblk->write_failed);
353#ifdef CONFIG_NVM_DEBUG
354 pblk_print_failed_rqd(pblk, rqd, rqd->error);
355#endif
356}
357
358void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
359{
360 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
361 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
362 atomic_long_inc(&pblk->read_empty);
363 return;
364 }
365
366 switch (rqd->error) {
367 case NVM_RSP_WARN_HIGHECC:
368 atomic_long_inc(&pblk->read_high_ecc);
369 break;
370 case NVM_RSP_ERR_FAILECC:
371 case NVM_RSP_ERR_FAILCRC:
372 atomic_long_inc(&pblk->read_failed);
373 break;
374 default:
375 pr_err("pblk: unknown read error:%d\n", rqd->error);
376 }
377#ifdef CONFIG_NVM_DEBUG
378 pblk_print_failed_rqd(pblk, rqd, rqd->error);
379#endif
380}
381
c2e9f5d4
JG
382void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
383{
384 pblk->sec_per_write = sec_per_write;
385}
386
a4bd217b
JG
387int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
388{
389 struct nvm_tgt_dev *dev = pblk->dev;
390
391#ifdef CONFIG_NVM_DEBUG
392 struct ppa_addr *ppa_list;
393
394 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
395 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
396 WARN_ON(1);
397 return -EINVAL;
398 }
399
400 if (rqd->opcode == NVM_OP_PWRITE) {
401 struct pblk_line *line;
402 struct ppa_addr ppa;
403 int i;
404
405 for (i = 0; i < rqd->nr_ppas; i++) {
406 ppa = ppa_list[i];
407 line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
408
409 spin_lock(&line->lock);
410 if (line->state != PBLK_LINESTATE_OPEN) {
411 pr_err("pblk: bad ppa: line:%d,state:%d\n",
412 line->id, line->state);
413 WARN_ON(1);
414 spin_unlock(&line->lock);
415 return -EINVAL;
416 }
417 spin_unlock(&line->lock);
418 }
419 }
420#endif
588726d3
JG
421
422 atomic_inc(&pblk->inflight_io);
423
a4bd217b
JG
424 return nvm_submit_io(dev, rqd);
425}
426
427struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
428 unsigned int nr_secs, unsigned int len,
de54e703 429 int alloc_type, gfp_t gfp_mask)
a4bd217b
JG
430{
431 struct nvm_tgt_dev *dev = pblk->dev;
a4bd217b
JG
432 void *kaddr = data;
433 struct page *page;
434 struct bio *bio;
435 int i, ret;
436
de54e703 437 if (alloc_type == PBLK_KMALLOC_META)
a4bd217b
JG
438 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
439
440 bio = bio_kmalloc(gfp_mask, nr_secs);
441 if (!bio)
442 return ERR_PTR(-ENOMEM);
443
444 for (i = 0; i < nr_secs; i++) {
445 page = vmalloc_to_page(kaddr);
446 if (!page) {
447 pr_err("pblk: could not map vmalloc bio\n");
448 bio_put(bio);
449 bio = ERR_PTR(-ENOMEM);
450 goto out;
451 }
452
453 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
454 if (ret != PAGE_SIZE) {
455 pr_err("pblk: could not add page to bio\n");
456 bio_put(bio);
457 bio = ERR_PTR(-ENOMEM);
458 goto out;
459 }
460
461 kaddr += PAGE_SIZE;
462 }
463out:
464 return bio;
465}
466
467int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
468 unsigned long secs_to_flush)
469{
c2e9f5d4 470 int max = pblk->sec_per_write;
a4bd217b
JG
471 int min = pblk->min_write_pgs;
472 int secs_to_sync = 0;
473
474 if (secs_avail >= max)
475 secs_to_sync = max;
476 else if (secs_avail >= min)
477 secs_to_sync = min * (secs_avail / min);
478 else if (secs_to_flush)
479 secs_to_sync = min;
480
481 return secs_to_sync;
482}
483
dd2a4343
JG
484void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
485{
486 u64 addr;
487 int i;
488
489 addr = find_next_zero_bit(line->map_bitmap,
490 pblk->lm.sec_per_line, line->cur_sec);
491 line->cur_sec = addr - nr_secs;
492
493 for (i = 0; i < nr_secs; i++, line->cur_sec--)
494 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
495}
496
497u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
a4bd217b
JG
498{
499 u64 addr;
500 int i;
501
476118c9
JG
502 lockdep_assert_held(&line->lock);
503
a4bd217b
JG
504 /* logic error: ppa out-of-bounds. Prevent generating bad address */
505 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
506 WARN(1, "pblk: page allocation out of bounds\n");
507 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
508 }
509
510 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
511 pblk->lm.sec_per_line, line->cur_sec);
512 for (i = 0; i < nr_secs; i++, line->cur_sec++)
513 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
514
515 return addr;
516}
517
518u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
519{
520 u64 addr;
521
522 /* Lock needed in case a write fails and a recovery needs to remap
523 * failed write buffer entries
524 */
525 spin_lock(&line->lock);
526 addr = __pblk_alloc_page(pblk, line, nr_secs);
527 line->left_msecs -= nr_secs;
528 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
529 spin_unlock(&line->lock);
530
531 return addr;
532}
533
dd2a4343
JG
534u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
535{
536 u64 paddr;
537
538 spin_lock(&line->lock);
539 paddr = find_next_zero_bit(line->map_bitmap,
540 pblk->lm.sec_per_line, line->cur_sec);
541 spin_unlock(&line->lock);
542
543 return paddr;
544}
545
a4bd217b
JG
546/*
547 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
548 * taking the per LUN semaphore.
549 */
550static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
dd2a4343 551 void *emeta_buf, u64 paddr, int dir)
a4bd217b
JG
552{
553 struct nvm_tgt_dev *dev = pblk->dev;
554 struct nvm_geo *geo = &dev->geo;
de54e703 555 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
a4bd217b 556 struct pblk_line_meta *lm = &pblk->lm;
63e3809c 557 void *ppa_list, *meta_list;
a4bd217b
JG
558 struct bio *bio;
559 struct nvm_rq rqd;
63e3809c 560 dma_addr_t dma_ppa_list, dma_meta_list;
a4bd217b 561 int min = pblk->min_write_pgs;
dd2a4343 562 int left_ppas = lm->emeta_sec[0];
a4bd217b
JG
563 int id = line->id;
564 int rq_ppas, rq_len;
565 int cmd_op, bio_op;
a4bd217b
JG
566 int i, j;
567 int ret;
568 DECLARE_COMPLETION_ONSTACK(wait);
569
570 if (dir == WRITE) {
571 bio_op = REQ_OP_WRITE;
572 cmd_op = NVM_OP_PWRITE;
a4bd217b
JG
573 } else if (dir == READ) {
574 bio_op = REQ_OP_READ;
575 cmd_op = NVM_OP_PREAD;
a4bd217b
JG
576 } else
577 return -EINVAL;
578
63e3809c
JG
579 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
580 &dma_meta_list);
581 if (!meta_list)
a4bd217b
JG
582 return -ENOMEM;
583
63e3809c
JG
584 ppa_list = meta_list + pblk_dma_meta_size;
585 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
586
a4bd217b
JG
587next_rq:
588 memset(&rqd, 0, sizeof(struct nvm_rq));
589
590 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
591 rq_len = rq_ppas * geo->sec_size;
592
de54e703
JG
593 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
594 l_mg->emeta_alloc_type, GFP_KERNEL);
a4bd217b
JG
595 if (IS_ERR(bio)) {
596 ret = PTR_ERR(bio);
597 goto free_rqd_dma;
598 }
599
600 bio->bi_iter.bi_sector = 0; /* internal bio */
601 bio_set_op_attrs(bio, bio_op, 0);
602
603 rqd.bio = bio;
63e3809c 604 rqd.meta_list = meta_list;
a4bd217b 605 rqd.ppa_list = ppa_list;
63e3809c 606 rqd.dma_meta_list = dma_meta_list;
a4bd217b 607 rqd.dma_ppa_list = dma_ppa_list;
63e3809c
JG
608 rqd.opcode = cmd_op;
609 rqd.nr_ppas = rq_ppas;
a4bd217b
JG
610 rqd.end_io = pblk_end_io_sync;
611 rqd.private = &wait;
612
613 if (dir == WRITE) {
63e3809c
JG
614 struct pblk_sec_meta *meta_list = rqd.meta_list;
615
f9c10152 616 rqd.flags = pblk_set_progr_mode(pblk, WRITE);
a4bd217b
JG
617 for (i = 0; i < rqd.nr_ppas; ) {
618 spin_lock(&line->lock);
619 paddr = __pblk_alloc_page(pblk, line, min);
620 spin_unlock(&line->lock);
63e3809c
JG
621 for (j = 0; j < min; j++, i++, paddr++) {
622 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
a4bd217b
JG
623 rqd.ppa_list[i] =
624 addr_to_gen_ppa(pblk, paddr, id);
63e3809c 625 }
a4bd217b
JG
626 }
627 } else {
628 for (i = 0; i < rqd.nr_ppas; ) {
629 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
630 int pos = pblk_dev_ppa_to_pos(geo, ppa);
f9c10152
JG
631 int read_type = PBLK_READ_RANDOM;
632
633 if (pblk_io_aligned(pblk, rq_ppas))
634 read_type = PBLK_READ_SEQUENTIAL;
635 rqd.flags = pblk_set_read_mode(pblk, read_type);
a4bd217b
JG
636
637 while (test_bit(pos, line->blk_bitmap)) {
638 paddr += min;
639 if (pblk_boundary_paddr_checks(pblk, paddr)) {
640 pr_err("pblk: corrupt emeta line:%d\n",
641 line->id);
642 bio_put(bio);
643 ret = -EINTR;
644 goto free_rqd_dma;
645 }
646
647 ppa = addr_to_gen_ppa(pblk, paddr, id);
648 pos = pblk_dev_ppa_to_pos(geo, ppa);
649 }
650
651 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
652 pr_err("pblk: corrupt emeta line:%d\n",
653 line->id);
654 bio_put(bio);
655 ret = -EINTR;
656 goto free_rqd_dma;
657 }
658
659 for (j = 0; j < min; j++, i++, paddr++)
660 rqd.ppa_list[i] =
661 addr_to_gen_ppa(pblk, paddr, line->id);
662 }
663 }
664
665 ret = pblk_submit_io(pblk, &rqd);
666 if (ret) {
667 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
668 bio_put(bio);
669 goto free_rqd_dma;
670 }
671
672 if (!wait_for_completion_io_timeout(&wait,
673 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
674 pr_err("pblk: emeta I/O timed out\n");
675 }
588726d3 676 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
677 reinit_completion(&wait);
678
f680f19a
JG
679 if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
680 bio_put(bio);
a4bd217b
JG
681
682 if (rqd.error) {
683 if (dir == WRITE)
684 pblk_log_write_err(pblk, &rqd);
685 else
686 pblk_log_read_err(pblk, &rqd);
687 }
688
dd2a4343 689 emeta_buf += rq_len;
a4bd217b
JG
690 left_ppas -= rq_ppas;
691 if (left_ppas)
692 goto next_rq;
693free_rqd_dma:
63e3809c 694 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
a4bd217b
JG
695 return ret;
696}
697
698u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
699{
700 struct nvm_tgt_dev *dev = pblk->dev;
701 struct nvm_geo *geo = &dev->geo;
702 struct pblk_line_meta *lm = &pblk->lm;
703 int bit;
704
705 /* This usually only happens on bad lines */
706 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
707 if (bit >= lm->blk_per_line)
708 return -1;
709
710 return bit * geo->sec_per_pl;
711}
712
713static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
714 u64 paddr, int dir)
715{
716 struct nvm_tgt_dev *dev = pblk->dev;
717 struct pblk_line_meta *lm = &pblk->lm;
718 struct bio *bio;
719 struct nvm_rq rqd;
720 __le64 *lba_list = NULL;
721 int i, ret;
722 int cmd_op, bio_op;
723 int flags;
724 DECLARE_COMPLETION_ONSTACK(wait);
725
726 if (dir == WRITE) {
727 bio_op = REQ_OP_WRITE;
728 cmd_op = NVM_OP_PWRITE;
729 flags = pblk_set_progr_mode(pblk, WRITE);
dd2a4343 730 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
a4bd217b
JG
731 } else if (dir == READ) {
732 bio_op = REQ_OP_READ;
733 cmd_op = NVM_OP_PREAD;
f9c10152 734 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
a4bd217b
JG
735 } else
736 return -EINVAL;
737
738 memset(&rqd, 0, sizeof(struct nvm_rq));
739
63e3809c
JG
740 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
741 &rqd.dma_meta_list);
742 if (!rqd.meta_list)
a4bd217b
JG
743 return -ENOMEM;
744
63e3809c
JG
745 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
746 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
747
a4bd217b
JG
748 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
749 if (IS_ERR(bio)) {
750 ret = PTR_ERR(bio);
751 goto free_ppa_list;
752 }
753
754 bio->bi_iter.bi_sector = 0; /* internal bio */
755 bio_set_op_attrs(bio, bio_op, 0);
756
757 rqd.bio = bio;
758 rqd.opcode = cmd_op;
759 rqd.flags = flags;
760 rqd.nr_ppas = lm->smeta_sec;
761 rqd.end_io = pblk_end_io_sync;
762 rqd.private = &wait;
763
764 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
63e3809c
JG
765 struct pblk_sec_meta *meta_list = rqd.meta_list;
766
a4bd217b 767 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
63e3809c
JG
768
769 if (dir == WRITE) {
f417aa0b 770 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
63e3809c
JG
771
772 meta_list[i].lba = lba_list[paddr] = addr_empty;
773 }
a4bd217b
JG
774 }
775
776 /*
777 * This I/O is sent by the write thread when a line is replace. Since
778 * the write thread is the only one sending write and erase commands,
779 * there is no need to take the LUN semaphore.
780 */
781 ret = pblk_submit_io(pblk, &rqd);
782 if (ret) {
783 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
784 bio_put(bio);
785 goto free_ppa_list;
786 }
787
788 if (!wait_for_completion_io_timeout(&wait,
789 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
790 pr_err("pblk: smeta I/O timed out\n");
791 }
588726d3 792 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
793
794 if (rqd.error) {
795 if (dir == WRITE)
796 pblk_log_write_err(pblk, &rqd);
797 else
798 pblk_log_read_err(pblk, &rqd);
799 }
800
801free_ppa_list:
63e3809c 802 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
a4bd217b
JG
803
804 return ret;
805}
806
807int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
808{
809 u64 bpaddr = pblk_line_smeta_start(pblk, line);
810
811 return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
812}
813
dd2a4343
JG
814int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
815 void *emeta_buf)
a4bd217b 816{
dd2a4343
JG
817 return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
818 line->emeta_ssec, READ);
a4bd217b
JG
819}
820
821static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
822 struct ppa_addr ppa)
823{
824 rqd->opcode = NVM_OP_ERASE;
825 rqd->ppa_addr = ppa;
826 rqd->nr_ppas = 1;
827 rqd->flags = pblk_set_progr_mode(pblk, ERASE);
828 rqd->bio = NULL;
829}
830
831static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
832{
833 struct nvm_rq rqd;
588726d3 834 int ret = 0;
a4bd217b
JG
835 DECLARE_COMPLETION_ONSTACK(wait);
836
837 memset(&rqd, 0, sizeof(struct nvm_rq));
838
839 pblk_setup_e_rq(pblk, &rqd, ppa);
840
841 rqd.end_io = pblk_end_io_sync;
842 rqd.private = &wait;
843
844 /* The write thread schedules erases so that it minimizes disturbances
845 * with writes. Thus, there is no need to take the LUN semaphore.
846 */
847 ret = pblk_submit_io(pblk, &rqd);
848 if (ret) {
849 struct nvm_tgt_dev *dev = pblk->dev;
850 struct nvm_geo *geo = &dev->geo;
851
852 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
853 pblk_dev_ppa_to_line(ppa),
854 pblk_dev_ppa_to_pos(geo, ppa));
855
856 rqd.error = ret;
857 goto out;
858 }
859
860 if (!wait_for_completion_io_timeout(&wait,
861 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
862 pr_err("pblk: sync erase timed out\n");
863 }
864
865out:
866 rqd.private = pblk;
867 __pblk_end_io_erase(pblk, &rqd);
868
588726d3 869 return ret;
a4bd217b
JG
870}
871
872int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
873{
874 struct pblk_line_meta *lm = &pblk->lm;
875 struct ppa_addr ppa;
588726d3 876 int ret, bit = -1;
a4bd217b 877
a44f53fa
JG
878 /* Erase only good blocks, one at a time */
879 do {
880 spin_lock(&line->lock);
881 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
882 bit + 1);
883 if (bit >= lm->blk_per_line) {
884 spin_unlock(&line->lock);
885 break;
886 }
887
a4bd217b
JG
888 ppa = pblk->luns[bit].bppa; /* set ch and lun */
889 ppa.g.blk = line->id;
890
a44f53fa 891 atomic_dec(&line->left_eblks);
a4bd217b 892 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
a44f53fa 893 spin_unlock(&line->lock);
a4bd217b 894
588726d3
JG
895 ret = pblk_blk_erase_sync(pblk, ppa);
896 if (ret) {
a4bd217b 897 pr_err("pblk: failed to erase line %d\n", line->id);
588726d3 898 return ret;
a4bd217b 899 }
a44f53fa 900 } while (1);
a4bd217b
JG
901
902 return 0;
903}
904
dd2a4343
JG
905static void pblk_line_setup_metadata(struct pblk_line *line,
906 struct pblk_line_mgmt *l_mg,
907 struct pblk_line_meta *lm)
908{
909 int meta_line;
910
588726d3
JG
911 lockdep_assert_held(&l_mg->free_lock);
912
dd2a4343
JG
913retry_meta:
914 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
915 if (meta_line == PBLK_DATA_LINES) {
916 spin_unlock(&l_mg->free_lock);
917 io_schedule();
918 spin_lock(&l_mg->free_lock);
919 goto retry_meta;
920 }
921
922 set_bit(meta_line, &l_mg->meta_bitmap);
923 line->meta_line = meta_line;
924
925 line->smeta = l_mg->sline_meta[meta_line];
926 line->emeta = l_mg->eline_meta[meta_line];
927
928 memset(line->smeta, 0, lm->smeta_len);
929 memset(line->emeta->buf, 0, lm->emeta_len[0]);
930
931 line->emeta->mem = 0;
932 atomic_set(&line->emeta->sync, 0);
933}
934
a4bd217b
JG
935/* For now lines are always assumed full lines. Thus, smeta former and current
936 * lun bitmaps are omitted.
937 */
dd2a4343 938static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
a4bd217b
JG
939 struct pblk_line *cur)
940{
941 struct nvm_tgt_dev *dev = pblk->dev;
942 struct nvm_geo *geo = &dev->geo;
943 struct pblk_line_meta *lm = &pblk->lm;
944 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
dd2a4343
JG
945 struct pblk_emeta *emeta = line->emeta;
946 struct line_emeta *emeta_buf = emeta->buf;
947 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
a4bd217b
JG
948 int nr_blk_line;
949
950 /* After erasing the line, new bad blocks might appear and we risk
951 * having an invalid line
952 */
953 nr_blk_line = lm->blk_per_line -
954 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
955 if (nr_blk_line < lm->min_blk_line) {
956 spin_lock(&l_mg->free_lock);
957 spin_lock(&line->lock);
958 line->state = PBLK_LINESTATE_BAD;
959 spin_unlock(&line->lock);
960
961 list_add_tail(&line->list, &l_mg->bad_list);
962 spin_unlock(&l_mg->free_lock);
963
964 pr_debug("pblk: line %d is bad\n", line->id);
965
966 return 0;
967 }
968
969 /* Run-time metadata */
dd2a4343 970 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
a4bd217b
JG
971
972 /* Mark LUNs allocated in this line (all for now) */
973 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
974
dd2a4343
JG
975 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
976 memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
977 smeta_buf->header.id = cpu_to_le32(line->id);
978 smeta_buf->header.type = cpu_to_le16(line->type);
979 smeta_buf->header.version = cpu_to_le16(1);
a4bd217b
JG
980
981 /* Start metadata */
dd2a4343
JG
982 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
983 smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
a4bd217b
JG
984
985 /* Fill metadata among lines */
986 if (cur) {
987 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
dd2a4343
JG
988 smeta_buf->prev_id = cpu_to_le32(cur->id);
989 cur->emeta->buf->next_id = cpu_to_le32(line->id);
a4bd217b 990 } else {
dd2a4343 991 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
a4bd217b
JG
992 }
993
994 /* All smeta must be set at this point */
dd2a4343
JG
995 smeta_buf->header.crc = cpu_to_le32(
996 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
997 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
a4bd217b
JG
998
999 /* End metadata */
dd2a4343
JG
1000 memcpy(&emeta_buf->header, &smeta_buf->header,
1001 sizeof(struct line_header));
1002 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1003 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1004 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1005 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1006 emeta_buf->crc = cpu_to_le32(0);
1007 emeta_buf->prev_id = smeta_buf->prev_id;
a4bd217b
JG
1008
1009 return 1;
1010}
1011
1012/* For now lines are always assumed full lines. Thus, smeta former and current
1013 * lun bitmaps are omitted.
1014 */
1015static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1016 int init)
1017{
1018 struct nvm_tgt_dev *dev = pblk->dev;
1019 struct nvm_geo *geo = &dev->geo;
1020 struct pblk_line_meta *lm = &pblk->lm;
1021 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1022 int nr_bb = 0;
1023 u64 off;
1024 int bit = -1;
1025
1026 line->sec_in_line = lm->sec_per_line;
1027
1028 /* Capture bad block information on line mapping bitmaps */
1029 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1030 bit + 1)) < lm->blk_per_line) {
1031 off = bit * geo->sec_per_pl;
1032 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1033 lm->sec_per_line);
1034 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1035 lm->sec_per_line);
1036 line->sec_in_line -= geo->sec_per_blk;
1037 if (bit >= lm->emeta_bb)
1038 nr_bb++;
1039 }
1040
1041 /* Mark smeta metadata sectors as bad sectors */
1042 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1043 off = bit * geo->sec_per_pl;
a4bd217b
JG
1044 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1045 line->sec_in_line -= lm->smeta_sec;
1046 line->smeta_ssec = off;
1047 line->cur_sec = off + lm->smeta_sec;
1048
1049 if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
1050 pr_debug("pblk: line smeta I/O failed. Retry\n");
588726d3 1051 return 1;
a4bd217b
JG
1052 }
1053
1054 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1055
1056 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1057 * blocks to make sure that there are enough sectors to store emeta
1058 */
1059 bit = lm->sec_per_line;
dd2a4343
JG
1060 off = lm->sec_per_line - lm->emeta_sec[0];
1061 bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
a4bd217b
JG
1062 while (nr_bb) {
1063 off -= geo->sec_per_pl;
1064 if (!test_bit(off, line->invalid_bitmap)) {
1065 bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1066 nr_bb--;
1067 }
1068 }
1069
dd2a4343 1070 line->sec_in_line -= lm->emeta_sec[0];
a4bd217b 1071 line->emeta_ssec = off;
dd2a4343 1072 line->nr_valid_lbas = 0;
0880a9aa 1073 line->left_msecs = line->sec_in_line;
dd2a4343 1074 *line->vsc = cpu_to_le32(line->sec_in_line);
a4bd217b
JG
1075
1076 if (lm->sec_per_line - line->sec_in_line !=
1077 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1078 spin_lock(&line->lock);
1079 line->state = PBLK_LINESTATE_BAD;
1080 spin_unlock(&line->lock);
1081
1082 list_add_tail(&line->list, &l_mg->bad_list);
1083 pr_err("pblk: unexpected line %d is bad\n", line->id);
1084
1085 return 0;
1086 }
1087
1088 return 1;
1089}
1090
1091static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1092{
1093 struct pblk_line_meta *lm = &pblk->lm;
a44f53fa 1094 int blk_in_line = atomic_read(&line->blk_in_line);
a4bd217b
JG
1095
1096 line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1097 if (!line->map_bitmap)
1098 return -ENOMEM;
1099 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1100
1101 /* invalid_bitmap is special since it is used when line is closed. No
1102 * need to zeroized; it will be initialized using bb info form
1103 * map_bitmap
1104 */
1105 line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1106 if (!line->invalid_bitmap) {
1107 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1108 return -ENOMEM;
1109 }
1110
1111 spin_lock(&line->lock);
1112 if (line->state != PBLK_LINESTATE_FREE) {
588726d3
JG
1113 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1114 mempool_free(line->map_bitmap, pblk->line_meta_pool);
a4bd217b 1115 spin_unlock(&line->lock);
588726d3
JG
1116 WARN(1, "pblk: corrupted line %d, state %d\n",
1117 line->id, line->state);
1118 return -EAGAIN;
a4bd217b 1119 }
588726d3 1120
a4bd217b 1121 line->state = PBLK_LINESTATE_OPEN;
a44f53fa
JG
1122
1123 atomic_set(&line->left_eblks, blk_in_line);
1124 atomic_set(&line->left_seblks, blk_in_line);
dd2a4343
JG
1125
1126 line->meta_distance = lm->meta_distance;
a4bd217b
JG
1127 spin_unlock(&line->lock);
1128
1129 /* Bad blocks do not need to be erased */
1130 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
a4bd217b
JG
1131
1132 kref_init(&line->ref);
1133
1134 return 0;
1135}
1136
1137int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1138{
1139 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1140 int ret;
1141
1142 spin_lock(&l_mg->free_lock);
1143 l_mg->data_line = line;
1144 list_del(&line->list);
a4bd217b
JG
1145
1146 ret = pblk_line_prepare(pblk, line);
1147 if (ret) {
1148 list_add(&line->list, &l_mg->free_list);
3dc001f3 1149 spin_unlock(&l_mg->free_lock);
a4bd217b
JG
1150 return ret;
1151 }
3dc001f3 1152 spin_unlock(&l_mg->free_lock);
a4bd217b
JG
1153
1154 pblk_rl_free_lines_dec(&pblk->rl, line);
1155
1156 if (!pblk_line_init_bb(pblk, line, 0)) {
1157 list_add(&line->list, &l_mg->free_list);
1158 return -EINTR;
1159 }
1160
1161 return 0;
1162}
1163
1164void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1165{
1166 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1167 line->map_bitmap = NULL;
1168 line->smeta = NULL;
1169 line->emeta = NULL;
1170}
1171
1172struct pblk_line *pblk_line_get(struct pblk *pblk)
1173{
1174 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1175 struct pblk_line_meta *lm = &pblk->lm;
588726d3
JG
1176 struct pblk_line *line;
1177 int ret, bit;
a4bd217b
JG
1178
1179 lockdep_assert_held(&l_mg->free_lock);
1180
588726d3 1181retry:
a4bd217b
JG
1182 if (list_empty(&l_mg->free_list)) {
1183 pr_err("pblk: no free lines\n");
588726d3 1184 return NULL;
a4bd217b
JG
1185 }
1186
1187 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1188 list_del(&line->list);
1189 l_mg->nr_free_lines--;
1190
1191 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1192 if (unlikely(bit >= lm->blk_per_line)) {
1193 spin_lock(&line->lock);
1194 line->state = PBLK_LINESTATE_BAD;
1195 spin_unlock(&line->lock);
1196
1197 list_add_tail(&line->list, &l_mg->bad_list);
1198
1199 pr_debug("pblk: line %d is bad\n", line->id);
588726d3 1200 goto retry;
a4bd217b
JG
1201 }
1202
588726d3
JG
1203 ret = pblk_line_prepare(pblk, line);
1204 if (ret) {
1205 if (ret == -EAGAIN) {
1206 list_add(&line->list, &l_mg->corrupt_list);
1207 goto retry;
1208 } else {
1209 pr_err("pblk: failed to prepare line %d\n", line->id);
1210 list_add(&line->list, &l_mg->free_list);
1211 l_mg->nr_free_lines++;
1212 return NULL;
1213 }
a4bd217b
JG
1214 }
1215
a4bd217b
JG
1216 return line;
1217}
1218
1219static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1220 struct pblk_line *line)
1221{
1222 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1223 struct pblk_line *retry_line;
1224
588726d3 1225retry:
a4bd217b
JG
1226 spin_lock(&l_mg->free_lock);
1227 retry_line = pblk_line_get(pblk);
1228 if (!retry_line) {
be388d9f 1229 l_mg->data_line = NULL;
a4bd217b
JG
1230 spin_unlock(&l_mg->free_lock);
1231 return NULL;
1232 }
1233
1234 retry_line->smeta = line->smeta;
1235 retry_line->emeta = line->emeta;
1236 retry_line->meta_line = line->meta_line;
a4bd217b 1237
be388d9f 1238 pblk_line_free(pblk, line);
3dc001f3 1239 l_mg->data_line = retry_line;
a4bd217b
JG
1240 spin_unlock(&l_mg->free_lock);
1241
a4bd217b
JG
1242 pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1243
588726d3
JG
1244 if (pblk_line_erase(pblk, retry_line))
1245 goto retry;
1246
a4bd217b
JG
1247 return retry_line;
1248}
1249
588726d3
JG
1250static void pblk_set_space_limit(struct pblk *pblk)
1251{
1252 struct pblk_rl *rl = &pblk->rl;
1253
1254 atomic_set(&rl->rb_space, 0);
1255}
1256
a4bd217b
JG
1257struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1258{
1259 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1260 struct pblk_line *line;
a4bd217b
JG
1261 int is_next = 0;
1262
1263 spin_lock(&l_mg->free_lock);
1264 line = pblk_line_get(pblk);
1265 if (!line) {
1266 spin_unlock(&l_mg->free_lock);
1267 return NULL;
1268 }
1269
1270 line->seq_nr = l_mg->d_seq_nr++;
1271 line->type = PBLK_LINETYPE_DATA;
1272 l_mg->data_line = line;
1273
dd2a4343 1274 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
a4bd217b
JG
1275
1276 /* Allocate next line for preparation */
1277 l_mg->data_next = pblk_line_get(pblk);
588726d3
JG
1278 if (!l_mg->data_next) {
1279 /* If we cannot get a new line, we need to stop the pipeline.
1280 * Only allow as many writes in as we can store safely and then
1281 * fail gracefully
1282 */
1283 pblk_set_space_limit(pblk);
1284
1285 l_mg->data_next = NULL;
1286 } else {
a4bd217b
JG
1287 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1288 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1289 is_next = 1;
1290 }
1291 spin_unlock(&l_mg->free_lock);
1292
588726d3
JG
1293 if (pblk_line_erase(pblk, line)) {
1294 line = pblk_line_retry(pblk, line);
1295 if (!line)
1296 return NULL;
1297 }
1298
a4bd217b
JG
1299 pblk_rl_free_lines_dec(&pblk->rl, line);
1300 if (is_next)
1301 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1302
a4bd217b 1303retry_setup:
dd2a4343 1304 if (!pblk_line_init_metadata(pblk, line, NULL)) {
a4bd217b
JG
1305 line = pblk_line_retry(pblk, line);
1306 if (!line)
1307 return NULL;
1308
1309 goto retry_setup;
1310 }
1311
1312 if (!pblk_line_init_bb(pblk, line, 1)) {
1313 line = pblk_line_retry(pblk, line);
1314 if (!line)
1315 return NULL;
1316
1317 goto retry_setup;
1318 }
1319
1320 return line;
1321}
1322
588726d3
JG
1323static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1324{
1325 lockdep_assert_held(&pblk->l_mg.free_lock);
1326
1327 pblk_set_space_limit(pblk);
1328 pblk->state = PBLK_STATE_STOPPING;
1329}
1330
1331void pblk_pipeline_stop(struct pblk *pblk)
1332{
1333 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1334 int ret;
1335
1336 spin_lock(&l_mg->free_lock);
1337 if (pblk->state == PBLK_STATE_RECOVERING ||
1338 pblk->state == PBLK_STATE_STOPPED) {
1339 spin_unlock(&l_mg->free_lock);
1340 return;
1341 }
1342 pblk->state = PBLK_STATE_RECOVERING;
1343 spin_unlock(&l_mg->free_lock);
1344
1345 pblk_flush_writer(pblk);
1346 pblk_wait_for_meta(pblk);
1347
1348 ret = pblk_recov_pad(pblk);
1349 if (ret) {
1350 pr_err("pblk: could not close data on teardown(%d)\n", ret);
1351 return;
1352 }
1353
ee8d5c1a 1354 flush_workqueue(pblk->bb_wq);
588726d3
JG
1355 pblk_line_close_meta_sync(pblk);
1356
1357 spin_lock(&l_mg->free_lock);
1358 pblk->state = PBLK_STATE_STOPPED;
1359 l_mg->data_line = NULL;
1360 l_mg->data_next = NULL;
1361 spin_unlock(&l_mg->free_lock);
1362}
1363
1364void pblk_line_replace_data(struct pblk *pblk)
a4bd217b 1365{
a4bd217b
JG
1366 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1367 struct pblk_line *cur, *new;
1368 unsigned int left_seblks;
a4bd217b
JG
1369 int is_next = 0;
1370
1371 cur = l_mg->data_line;
1372 new = l_mg->data_next;
1373 if (!new)
588726d3 1374 return;
a4bd217b
JG
1375 l_mg->data_line = new;
1376
588726d3
JG
1377 spin_lock(&l_mg->free_lock);
1378 if (pblk->state != PBLK_STATE_RUNNING) {
1379 l_mg->data_line = NULL;
1380 l_mg->data_next = NULL;
1381 spin_unlock(&l_mg->free_lock);
1382 return;
1383 }
1384
1385 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1386 spin_unlock(&l_mg->free_lock);
1387
1388retry_erase:
a4bd217b
JG
1389 left_seblks = atomic_read(&new->left_seblks);
1390 if (left_seblks) {
1391 /* If line is not fully erased, erase it */
a44f53fa 1392 if (atomic_read(&new->left_eblks)) {
a4bd217b 1393 if (pblk_line_erase(pblk, new))
588726d3 1394 return;
a4bd217b
JG
1395 } else {
1396 io_schedule();
1397 }
588726d3 1398 goto retry_erase;
a4bd217b
JG
1399 }
1400
a4bd217b 1401retry_setup:
dd2a4343 1402 if (!pblk_line_init_metadata(pblk, new, cur)) {
a4bd217b 1403 new = pblk_line_retry(pblk, new);
f3236cef 1404 if (!new)
588726d3 1405 return;
a4bd217b
JG
1406
1407 goto retry_setup;
1408 }
1409
1410 if (!pblk_line_init_bb(pblk, new, 1)) {
1411 new = pblk_line_retry(pblk, new);
1412 if (!new)
588726d3 1413 return;
a4bd217b
JG
1414
1415 goto retry_setup;
1416 }
1417
588726d3
JG
1418 /* Allocate next line for preparation */
1419 spin_lock(&l_mg->free_lock);
1420 l_mg->data_next = pblk_line_get(pblk);
1421 if (!l_mg->data_next) {
1422 /* If we cannot get a new line, we need to stop the pipeline.
1423 * Only allow as many writes in as we can store safely and then
1424 * fail gracefully
1425 */
1426 pblk_stop_writes(pblk, new);
1427 l_mg->data_next = NULL;
1428 } else {
1429 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1430 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1431 is_next = 1;
1432 }
1433 spin_unlock(&l_mg->free_lock);
1434
1435 if (is_next)
1436 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
a4bd217b
JG
1437}
1438
1439void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1440{
1441 if (line->map_bitmap)
1442 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1443 if (line->invalid_bitmap)
1444 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1445
dd2a4343
JG
1446 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1447
a4bd217b
JG
1448 line->map_bitmap = NULL;
1449 line->invalid_bitmap = NULL;
be388d9f
JG
1450 line->smeta = NULL;
1451 line->emeta = NULL;
a4bd217b
JG
1452}
1453
1454void pblk_line_put(struct kref *ref)
1455{
1456 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1457 struct pblk *pblk = line->pblk;
1458 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1459
1460 spin_lock(&line->lock);
1461 WARN_ON(line->state != PBLK_LINESTATE_GC);
1462 line->state = PBLK_LINESTATE_FREE;
1463 line->gc_group = PBLK_LINEGC_NONE;
1464 pblk_line_free(pblk, line);
1465 spin_unlock(&line->lock);
1466
1467 spin_lock(&l_mg->free_lock);
1468 list_add_tail(&line->list, &l_mg->free_list);
1469 l_mg->nr_free_lines++;
1470 spin_unlock(&l_mg->free_lock);
1471
1472 pblk_rl_free_lines_inc(&pblk->rl, line);
1473}
1474
1475int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1476{
1477 struct nvm_rq *rqd;
1478 int err;
1479
084ec9ba
JG
1480 rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
1481 memset(rqd, 0, pblk_g_rq_size);
a4bd217b
JG
1482
1483 pblk_setup_e_rq(pblk, rqd, ppa);
1484
1485 rqd->end_io = pblk_end_io_erase;
1486 rqd->private = pblk;
1487
1488 /* The write thread schedules erases so that it minimizes disturbances
1489 * with writes. Thus, there is no need to take the LUN semaphore.
1490 */
1491 err = pblk_submit_io(pblk, rqd);
1492 if (err) {
1493 struct nvm_tgt_dev *dev = pblk->dev;
1494 struct nvm_geo *geo = &dev->geo;
1495
1496 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1497 pblk_dev_ppa_to_line(ppa),
1498 pblk_dev_ppa_to_pos(geo, ppa));
1499 }
1500
1501 return err;
1502}
1503
1504struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1505{
1506 return pblk->l_mg.data_line;
1507}
1508
d624f371
JG
1509/* For now, always erase next line */
1510struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
a4bd217b
JG
1511{
1512 return pblk->l_mg.data_next;
1513}
1514
1515int pblk_line_is_full(struct pblk_line *line)
1516{
1517 return (line->left_msecs == 0);
1518}
1519
588726d3
JG
1520void pblk_line_close_meta_sync(struct pblk *pblk)
1521{
1522 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1523 struct pblk_line_meta *lm = &pblk->lm;
1524 struct pblk_line *line, *tline;
1525 LIST_HEAD(list);
1526
1527 spin_lock(&l_mg->close_lock);
1528 if (list_empty(&l_mg->emeta_list)) {
1529 spin_unlock(&l_mg->close_lock);
1530 return;
1531 }
1532
1533 list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1534 spin_unlock(&l_mg->close_lock);
1535
1536 list_for_each_entry_safe(line, tline, &list, list) {
1537 struct pblk_emeta *emeta = line->emeta;
1538
1539 while (emeta->mem < lm->emeta_len[0]) {
1540 int ret;
1541
1542 ret = pblk_submit_meta_io(pblk, line);
1543 if (ret) {
1544 pr_err("pblk: sync meta line %d failed (%d)\n",
1545 line->id, ret);
1546 return;
1547 }
1548 }
1549 }
1550
1551 pblk_wait_for_meta(pblk);
ee8d5c1a 1552 flush_workqueue(pblk->close_wq);
588726d3
JG
1553}
1554
1555static void pblk_line_should_sync_meta(struct pblk *pblk)
1556{
1557 if (pblk_rl_is_limit(&pblk->rl))
1558 pblk_line_close_meta_sync(pblk);
1559}
1560
a4bd217b
JG
1561void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1562{
1563 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
dd2a4343 1564 struct pblk_line_meta *lm = &pblk->lm;
a4bd217b
JG
1565 struct list_head *move_list;
1566
dd2a4343 1567 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
a4bd217b
JG
1568 "pblk: corrupt closed line %d\n", line->id);
1569
1570 spin_lock(&l_mg->free_lock);
1571 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1572 spin_unlock(&l_mg->free_lock);
1573
1574 spin_lock(&l_mg->gc_lock);
1575 spin_lock(&line->lock);
1576 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1577 line->state = PBLK_LINESTATE_CLOSED;
1578 move_list = pblk_line_gc_list(pblk, line);
1579
1580 list_add_tail(&line->list, move_list);
1581
1582 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1583 line->map_bitmap = NULL;
1584 line->smeta = NULL;
1585 line->emeta = NULL;
1586
1587 spin_unlock(&line->lock);
1588 spin_unlock(&l_mg->gc_lock);
b20ba1bc
JG
1589
1590 pblk_gc_should_kick(pblk);
a4bd217b
JG
1591}
1592
dd2a4343
JG
1593void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1594{
1595 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1596 struct pblk_line_meta *lm = &pblk->lm;
1597 struct pblk_emeta *emeta = line->emeta;
1598 struct line_emeta *emeta_buf = emeta->buf;
1599
588726d3 1600 /* No need for exact vsc value; avoid a big line lock and take aprox. */
dd2a4343
JG
1601 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1602 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1603
1604 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1605 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1606
1607 spin_lock(&l_mg->close_lock);
1608 spin_lock(&line->lock);
1609 list_add_tail(&line->list, &l_mg->emeta_list);
1610 spin_unlock(&line->lock);
1611 spin_unlock(&l_mg->close_lock);
588726d3
JG
1612
1613 pblk_line_should_sync_meta(pblk);
dd2a4343
JG
1614}
1615
a4bd217b
JG
1616void pblk_line_close_ws(struct work_struct *work)
1617{
1618 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1619 ws);
1620 struct pblk *pblk = line_ws->pblk;
1621 struct pblk_line *line = line_ws->line;
1622
1623 pblk_line_close(pblk, line);
1624 mempool_free(line_ws, pblk->line_ws_pool);
1625}
1626
1627void pblk_line_mark_bb(struct work_struct *work)
1628{
1629 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1630 ws);
1631 struct pblk *pblk = line_ws->pblk;
1632 struct nvm_tgt_dev *dev = pblk->dev;
1633 struct ppa_addr *ppa = line_ws->priv;
1634 int ret;
1635
1636 ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1637 if (ret) {
1638 struct pblk_line *line;
1639 int pos;
1640
1641 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1642 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1643
1644 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1645 line->id, pos);
1646 }
1647
1648 kfree(ppa);
1649 mempool_free(line_ws, pblk->line_ws_pool);
1650}
1651
1652void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
ef576494
JG
1653 void (*work)(struct work_struct *),
1654 struct workqueue_struct *wq)
a4bd217b
JG
1655{
1656 struct pblk_line_ws *line_ws;
1657
1658 line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1659 if (!line_ws)
1660 return;
1661
1662 line_ws->pblk = pblk;
1663 line_ws->line = line;
1664 line_ws->priv = priv;
1665
1666 INIT_WORK(&line_ws->ws, work);
ef576494 1667 queue_work(wq, &line_ws->ws);
a4bd217b
JG
1668}
1669
1670void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1671 unsigned long *lun_bitmap)
1672{
1673 struct nvm_tgt_dev *dev = pblk->dev;
1674 struct nvm_geo *geo = &dev->geo;
1675 struct pblk_lun *rlun;
dd2a4343 1676 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
a4bd217b
JG
1677 int ret;
1678
1679 /*
1680 * Only send one inflight I/O per LUN. Since we map at a page
1681 * granurality, all ppas in the I/O will map to the same LUN
1682 */
1683#ifdef CONFIG_NVM_DEBUG
1684 int i;
1685
1686 for (i = 1; i < nr_ppas; i++)
1687 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1688 ppa_list[0].g.ch != ppa_list[i].g.ch);
1689#endif
1690 /* If the LUN has been locked for this same request, do no attempt to
1691 * lock it again
1692 */
dd2a4343 1693 if (test_and_set_bit(pos, lun_bitmap))
a4bd217b
JG
1694 return;
1695
dd2a4343 1696 rlun = &pblk->luns[pos];
a4bd217b
JG
1697 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1698 if (ret) {
1699 switch (ret) {
1700 case -ETIME:
1701 pr_err("pblk: lun semaphore timed out\n");
1702 break;
1703 case -EINTR:
1704 pr_err("pblk: lun semaphore timed out\n");
1705 break;
1706 }
1707 }
1708}
1709
1710void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1711 unsigned long *lun_bitmap)
1712{
1713 struct nvm_tgt_dev *dev = pblk->dev;
1714 struct nvm_geo *geo = &dev->geo;
1715 struct pblk_lun *rlun;
1716 int nr_luns = geo->nr_luns;
1717 int bit = -1;
1718
1719 while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1720 rlun = &pblk->luns[bit];
1721 up(&rlun->wr_sem);
1722 }
1723
1724 kfree(lun_bitmap);
1725}
1726
1727void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1728{
1729 struct ppa_addr l2p_ppa;
1730
1731 /* logic error: lba out-of-bounds. Ignore update */
1732 if (!(lba < pblk->rl.nr_secs)) {
1733 WARN(1, "pblk: corrupted L2P map request\n");
1734 return;
1735 }
1736
1737 spin_lock(&pblk->trans_lock);
1738 l2p_ppa = pblk_trans_map_get(pblk, lba);
1739
1740 if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1741 pblk_map_invalidate(pblk, l2p_ppa);
1742
1743 pblk_trans_map_set(pblk, lba, ppa);
1744 spin_unlock(&pblk->trans_lock);
1745}
1746
1747void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1748{
1749#ifdef CONFIG_NVM_DEBUG
1750 /* Callers must ensure that the ppa points to a cache address */
1751 BUG_ON(!pblk_addr_in_cache(ppa));
1752 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1753#endif
1754
1755 pblk_update_map(pblk, lba, ppa);
1756}
1757
1758int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1759 struct pblk_line *gc_line)
1760{
1761 struct ppa_addr l2p_ppa;
1762 int ret = 1;
1763
1764#ifdef CONFIG_NVM_DEBUG
1765 /* Callers must ensure that the ppa points to a cache address */
1766 BUG_ON(!pblk_addr_in_cache(ppa));
1767 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1768#endif
1769
1770 /* logic error: lba out-of-bounds. Ignore update */
1771 if (!(lba < pblk->rl.nr_secs)) {
1772 WARN(1, "pblk: corrupted L2P map request\n");
1773 return 0;
1774 }
1775
1776 spin_lock(&pblk->trans_lock);
1777 l2p_ppa = pblk_trans_map_get(pblk, lba);
1778
1779 /* Prevent updated entries to be overwritten by GC */
1780 if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1781 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1782 ret = 0;
1783 goto out;
1784 }
1785
1786 pblk_trans_map_set(pblk, lba, ppa);
1787out:
1788 spin_unlock(&pblk->trans_lock);
1789 return ret;
1790}
1791
1792void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1793 struct ppa_addr entry_line)
1794{
1795 struct ppa_addr l2p_line;
1796
1797#ifdef CONFIG_NVM_DEBUG
1798 /* Callers must ensure that the ppa points to a device address */
1799 BUG_ON(pblk_addr_in_cache(ppa));
1800#endif
1801 /* Invalidate and discard padded entries */
1802 if (lba == ADDR_EMPTY) {
1803#ifdef CONFIG_NVM_DEBUG
1804 atomic_long_inc(&pblk->padded_wb);
1805#endif
1806 pblk_map_invalidate(pblk, ppa);
1807 return;
1808 }
1809
1810 /* logic error: lba out-of-bounds. Ignore update */
1811 if (!(lba < pblk->rl.nr_secs)) {
1812 WARN(1, "pblk: corrupted L2P map request\n");
1813 return;
1814 }
1815
1816 spin_lock(&pblk->trans_lock);
1817 l2p_line = pblk_trans_map_get(pblk, lba);
1818
1819 /* Do not update L2P if the cacheline has been updated. In this case,
1820 * the mapped ppa must be invalidated
1821 */
1822 if (l2p_line.ppa != entry_line.ppa) {
1823 if (!pblk_ppa_empty(ppa))
1824 pblk_map_invalidate(pblk, ppa);
1825 goto out;
1826 }
1827
1828#ifdef CONFIG_NVM_DEBUG
1829 WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1830#endif
1831
1832 pblk_trans_map_set(pblk, lba, ppa);
1833out:
1834 spin_unlock(&pblk->trans_lock);
1835}
1836
1837void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1838 sector_t blba, int nr_secs)
1839{
1840 int i;
1841
1842 spin_lock(&pblk->trans_lock);
1843 for (i = 0; i < nr_secs; i++)
1844 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1845 spin_unlock(&pblk->trans_lock);
1846}
1847
1848void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1849 u64 *lba_list, int nr_secs)
1850{
1851 sector_t lba;
1852 int i;
1853
1854 spin_lock(&pblk->trans_lock);
1855 for (i = 0; i < nr_secs; i++) {
1856 lba = lba_list[i];
1857 if (lba == ADDR_EMPTY) {
1858 ppas[i].ppa = ADDR_EMPTY;
1859 } else {
1860 /* logic error: lba out-of-bounds. Ignore update */
1861 if (!(lba < pblk->rl.nr_secs)) {
1862 WARN(1, "pblk: corrupted L2P map request\n");
1863 continue;
1864 }
1865 ppas[i] = pblk_trans_map_get(pblk, lba);
1866 }
1867 }
1868 spin_unlock(&pblk->trans_lock);
1869}