Merge tag 'v4.20-rc2' into next-general
[linux-2.6-block.git] / drivers / lightnvm / pblk-read.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * pblk-read.c - pblk's read path
17  */
18
19 #include "pblk.h"
20
21 /*
22  * There is no guarantee that the value read from cache has not been updated and
23  * resides at another location in the cache. We guarantee though that if the
24  * value is read from the cache, it belongs to the mapped lba. In order to
25  * guarantee and order between writes and reads are ordered, a flush must be
26  * issued.
27  */
28 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
29                                 sector_t lba, struct ppa_addr ppa,
30                                 int bio_iter, bool advanced_bio)
31 {
32 #ifdef CONFIG_NVM_PBLK_DEBUG
33         /* Callers must ensure that the ppa points to a cache address */
34         BUG_ON(pblk_ppa_empty(ppa));
35         BUG_ON(!pblk_addr_in_cache(ppa));
36 #endif
37
38         return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
39                                                 bio_iter, advanced_bio);
40 }
41
42 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
43                                  struct bio *bio, sector_t blba,
44                                  unsigned long *read_bitmap)
45 {
46         struct pblk_sec_meta *meta_list = rqd->meta_list;
47         struct ppa_addr ppas[NVM_MAX_VLBA];
48         int nr_secs = rqd->nr_ppas;
49         bool advanced_bio = false;
50         int i, j = 0;
51
52         pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
53
54         for (i = 0; i < nr_secs; i++) {
55                 struct ppa_addr p = ppas[i];
56                 sector_t lba = blba + i;
57
58 retry:
59                 if (pblk_ppa_empty(p)) {
60                         WARN_ON(test_and_set_bit(i, read_bitmap));
61                         meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
62
63                         if (unlikely(!advanced_bio)) {
64                                 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
65                                 advanced_bio = true;
66                         }
67
68                         goto next;
69                 }
70
71                 /* Try to read from write buffer. The address is later checked
72                  * on the write buffer to prevent retrieving overwritten data.
73                  */
74                 if (pblk_addr_in_cache(p)) {
75                         if (!pblk_read_from_cache(pblk, bio, lba, p, i,
76                                                                 advanced_bio)) {
77                                 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
78                                 goto retry;
79                         }
80                         WARN_ON(test_and_set_bit(i, read_bitmap));
81                         meta_list[i].lba = cpu_to_le64(lba);
82                         advanced_bio = true;
83 #ifdef CONFIG_NVM_PBLK_DEBUG
84                         atomic_long_inc(&pblk->cache_reads);
85 #endif
86                 } else {
87                         /* Read from media non-cached sectors */
88                         rqd->ppa_list[j++] = p;
89                 }
90
91 next:
92                 if (advanced_bio)
93                         bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
94         }
95
96         if (pblk_io_aligned(pblk, nr_secs))
97                 rqd->is_seq = 1;
98
99 #ifdef CONFIG_NVM_PBLK_DEBUG
100         atomic_long_add(nr_secs, &pblk->inflight_reads);
101 #endif
102 }
103
104
105 static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
106                                 sector_t blba)
107 {
108         struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
109         int nr_lbas = rqd->nr_ppas;
110         int i;
111
112         for (i = 0; i < nr_lbas; i++) {
113                 u64 lba = le64_to_cpu(meta_lba_list[i].lba);
114
115                 if (lba == ADDR_EMPTY)
116                         continue;
117
118                 if (lba != blba + i) {
119 #ifdef CONFIG_NVM_PBLK_DEBUG
120                         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
121
122                         print_ppa(pblk, &ppa_list[i], "seq", i);
123 #endif
124                         pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
125                                                         lba, (u64)blba + i);
126                         WARN_ON(1);
127                 }
128         }
129 }
130
131 /*
132  * There can be holes in the lba list.
133  */
134 static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
135                                  u64 *lba_list, int nr_lbas)
136 {
137         struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
138         int i, j;
139
140         for (i = 0, j = 0; i < nr_lbas; i++) {
141                 u64 lba = lba_list[i];
142                 u64 meta_lba;
143
144                 if (lba == ADDR_EMPTY)
145                         continue;
146
147                 meta_lba = le64_to_cpu(meta_lba_list[j].lba);
148
149                 if (lba != meta_lba) {
150 #ifdef CONFIG_NVM_PBLK_DEBUG
151                         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
152
153                         print_ppa(pblk, &ppa_list[j], "rnd", j);
154 #endif
155                         pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
156                                                         meta_lba, lba);
157                         WARN_ON(1);
158                 }
159
160                 j++;
161         }
162
163         WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
164 }
165
166 static void pblk_end_user_read(struct bio *bio)
167 {
168 #ifdef CONFIG_NVM_PBLK_DEBUG
169         WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
170 #endif
171         bio_endio(bio);
172 }
173
174 static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
175                                bool put_line)
176 {
177         struct nvm_tgt_dev *dev = pblk->dev;
178         struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
179         struct bio *int_bio = rqd->bio;
180         unsigned long start_time = r_ctx->start_time;
181
182         generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
183
184         if (rqd->error)
185                 pblk_log_read_err(pblk, rqd);
186
187         pblk_read_check_seq(pblk, rqd, r_ctx->lba);
188
189         if (int_bio)
190                 bio_put(int_bio);
191
192         if (put_line)
193                 pblk_rq_to_line_put(pblk, rqd);
194
195 #ifdef CONFIG_NVM_PBLK_DEBUG
196         atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
197         atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
198 #endif
199
200         pblk_free_rqd(pblk, rqd, PBLK_READ);
201         atomic_dec(&pblk->inflight_io);
202 }
203
204 static void pblk_end_io_read(struct nvm_rq *rqd)
205 {
206         struct pblk *pblk = rqd->private;
207         struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
208         struct bio *bio = (struct bio *)r_ctx->private;
209
210         pblk_end_user_read(bio);
211         __pblk_end_io_read(pblk, rqd, true);
212 }
213
214 static void pblk_end_partial_read(struct nvm_rq *rqd)
215 {
216         struct pblk *pblk = rqd->private;
217         struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
218         struct pblk_pr_ctx *pr_ctx = r_ctx->private;
219         struct bio *new_bio = rqd->bio;
220         struct bio *bio = pr_ctx->orig_bio;
221         struct bio_vec src_bv, dst_bv;
222         struct pblk_sec_meta *meta_list = rqd->meta_list;
223         int bio_init_idx = pr_ctx->bio_init_idx;
224         unsigned long *read_bitmap = pr_ctx->bitmap;
225         int nr_secs = pr_ctx->orig_nr_secs;
226         int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
227         __le64 *lba_list_mem, *lba_list_media;
228         void *src_p, *dst_p;
229         int hole, i;
230
231         if (unlikely(nr_holes == 1)) {
232                 struct ppa_addr ppa;
233
234                 ppa = rqd->ppa_addr;
235                 rqd->ppa_list = pr_ctx->ppa_ptr;
236                 rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
237                 rqd->ppa_list[0] = ppa;
238         }
239
240         /* Re-use allocated memory for intermediate lbas */
241         lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
242         lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
243
244         for (i = 0; i < nr_secs; i++) {
245                 lba_list_media[i] = meta_list[i].lba;
246                 meta_list[i].lba = lba_list_mem[i];
247         }
248
249         /* Fill the holes in the original bio */
250         i = 0;
251         hole = find_first_zero_bit(read_bitmap, nr_secs);
252         do {
253                 struct pblk_line *line;
254
255                 line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
256                 kref_put(&line->ref, pblk_line_put);
257
258                 meta_list[hole].lba = lba_list_media[i];
259
260                 src_bv = new_bio->bi_io_vec[i++];
261                 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
262
263                 src_p = kmap_atomic(src_bv.bv_page);
264                 dst_p = kmap_atomic(dst_bv.bv_page);
265
266                 memcpy(dst_p + dst_bv.bv_offset,
267                         src_p + src_bv.bv_offset,
268                         PBLK_EXPOSED_PAGE_SIZE);
269
270                 kunmap_atomic(src_p);
271                 kunmap_atomic(dst_p);
272
273                 mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
274
275                 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
276         } while (hole < nr_secs);
277
278         bio_put(new_bio);
279         kfree(pr_ctx);
280
281         /* restore original request */
282         rqd->bio = NULL;
283         rqd->nr_ppas = nr_secs;
284
285         bio_endio(bio);
286         __pblk_end_io_read(pblk, rqd, false);
287 }
288
289 static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
290                             unsigned int bio_init_idx,
291                             unsigned long *read_bitmap,
292                             int nr_holes)
293 {
294         struct pblk_sec_meta *meta_list = rqd->meta_list;
295         struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
296         struct pblk_pr_ctx *pr_ctx;
297         struct bio *new_bio, *bio = r_ctx->private;
298         __le64 *lba_list_mem;
299         int nr_secs = rqd->nr_ppas;
300         int i;
301
302         /* Re-use allocated memory for intermediate lbas */
303         lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
304
305         new_bio = bio_alloc(GFP_KERNEL, nr_holes);
306
307         if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
308                 goto fail_bio_put;
309
310         if (nr_holes != new_bio->bi_vcnt) {
311                 WARN_ONCE(1, "pblk: malformed bio\n");
312                 goto fail_free_pages;
313         }
314
315         pr_ctx = kmalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
316         if (!pr_ctx)
317                 goto fail_free_pages;
318
319         for (i = 0; i < nr_secs; i++)
320                 lba_list_mem[i] = meta_list[i].lba;
321
322         new_bio->bi_iter.bi_sector = 0; /* internal bio */
323         bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
324
325         rqd->bio = new_bio;
326         rqd->nr_ppas = nr_holes;
327
328         pr_ctx->ppa_ptr = NULL;
329         pr_ctx->orig_bio = bio;
330         bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
331         pr_ctx->bio_init_idx = bio_init_idx;
332         pr_ctx->orig_nr_secs = nr_secs;
333         r_ctx->private = pr_ctx;
334
335         if (unlikely(nr_holes == 1)) {
336                 pr_ctx->ppa_ptr = rqd->ppa_list;
337                 pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
338                 rqd->ppa_addr = rqd->ppa_list[0];
339         }
340         return 0;
341
342 fail_free_pages:
343         pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
344 fail_bio_put:
345         bio_put(new_bio);
346
347         return -ENOMEM;
348 }
349
350 static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
351                                  unsigned int bio_init_idx,
352                                  unsigned long *read_bitmap, int nr_secs)
353 {
354         int nr_holes;
355         int ret;
356
357         nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
358
359         if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
360                                     nr_holes))
361                 return NVM_IO_ERR;
362
363         rqd->end_io = pblk_end_partial_read;
364
365         ret = pblk_submit_io(pblk, rqd);
366         if (ret) {
367                 bio_put(rqd->bio);
368                 pblk_err(pblk, "partial read IO submission failed\n");
369                 goto err;
370         }
371
372         return NVM_IO_OK;
373
374 err:
375         pblk_err(pblk, "failed to perform partial read\n");
376
377         /* Free allocated pages in new bio */
378         pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
379         __pblk_end_io_read(pblk, rqd, false);
380         return NVM_IO_ERR;
381 }
382
383 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
384                          sector_t lba, unsigned long *read_bitmap)
385 {
386         struct pblk_sec_meta *meta_list = rqd->meta_list;
387         struct ppa_addr ppa;
388
389         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
390
391 #ifdef CONFIG_NVM_PBLK_DEBUG
392         atomic_long_inc(&pblk->inflight_reads);
393 #endif
394
395 retry:
396         if (pblk_ppa_empty(ppa)) {
397                 WARN_ON(test_and_set_bit(0, read_bitmap));
398                 meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
399                 return;
400         }
401
402         /* Try to read from write buffer. The address is later checked on the
403          * write buffer to prevent retrieving overwritten data.
404          */
405         if (pblk_addr_in_cache(ppa)) {
406                 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
407                         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
408                         goto retry;
409                 }
410
411                 WARN_ON(test_and_set_bit(0, read_bitmap));
412                 meta_list[0].lba = cpu_to_le64(lba);
413
414 #ifdef CONFIG_NVM_PBLK_DEBUG
415                 atomic_long_inc(&pblk->cache_reads);
416 #endif
417         } else {
418                 rqd->ppa_addr = ppa;
419         }
420 }
421
422 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
423 {
424         struct nvm_tgt_dev *dev = pblk->dev;
425         struct request_queue *q = dev->q;
426         sector_t blba = pblk_get_lba(bio);
427         unsigned int nr_secs = pblk_get_secs(bio);
428         struct pblk_g_ctx *r_ctx;
429         struct nvm_rq *rqd;
430         unsigned int bio_init_idx;
431         DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
432         int ret = NVM_IO_ERR;
433
434         generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
435                               &pblk->disk->part0);
436
437         bitmap_zero(read_bitmap, nr_secs);
438
439         rqd = pblk_alloc_rqd(pblk, PBLK_READ);
440
441         rqd->opcode = NVM_OP_PREAD;
442         rqd->nr_ppas = nr_secs;
443         rqd->bio = NULL; /* cloned bio if needed */
444         rqd->private = pblk;
445         rqd->end_io = pblk_end_io_read;
446
447         r_ctx = nvm_rq_to_pdu(rqd);
448         r_ctx->start_time = jiffies;
449         r_ctx->lba = blba;
450         r_ctx->private = bio; /* original bio */
451
452         /* Save the index for this bio's start. This is needed in case
453          * we need to fill a partial read.
454          */
455         bio_init_idx = pblk_get_bi_idx(bio);
456
457         if (pblk_alloc_rqd_meta(pblk, rqd))
458                 goto fail_rqd_free;
459
460         if (nr_secs > 1)
461                 pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
462         else
463                 pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
464
465         if (bitmap_full(read_bitmap, nr_secs)) {
466                 atomic_inc(&pblk->inflight_io);
467                 __pblk_end_io_read(pblk, rqd, false);
468                 return NVM_IO_DONE;
469         }
470
471         /* All sectors are to be read from the device */
472         if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
473                 struct bio *int_bio = NULL;
474
475                 /* Clone read bio to deal with read errors internally */
476                 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
477                 if (!int_bio) {
478                         pblk_err(pblk, "could not clone read bio\n");
479                         goto fail_end_io;
480                 }
481
482                 rqd->bio = int_bio;
483
484                 if (pblk_submit_io(pblk, rqd)) {
485                         pblk_err(pblk, "read IO submission failed\n");
486                         ret = NVM_IO_ERR;
487                         goto fail_end_io;
488                 }
489
490                 return NVM_IO_OK;
491         }
492
493         /* The read bio request could be partially filled by the write buffer,
494          * but there are some holes that need to be read from the drive.
495          */
496         ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
497                                     nr_secs);
498         if (ret)
499                 goto fail_meta_free;
500
501         return NVM_IO_OK;
502
503 fail_meta_free:
504         nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
505 fail_rqd_free:
506         pblk_free_rqd(pblk, rqd, PBLK_READ);
507         return ret;
508 fail_end_io:
509         __pblk_end_io_read(pblk, rqd, false);
510         return ret;
511 }
512
513 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
514                               struct pblk_line *line, u64 *lba_list,
515                               u64 *paddr_list_gc, unsigned int nr_secs)
516 {
517         struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
518         struct ppa_addr ppa_gc;
519         int valid_secs = 0;
520         int i;
521
522         pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
523
524         for (i = 0; i < nr_secs; i++) {
525                 if (lba_list[i] == ADDR_EMPTY)
526                         continue;
527
528                 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
529                 if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
530                         paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
531                         continue;
532                 }
533
534                 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
535         }
536
537 #ifdef CONFIG_NVM_PBLK_DEBUG
538         atomic_long_add(valid_secs, &pblk->inflight_reads);
539 #endif
540
541         return valid_secs;
542 }
543
544 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
545                       struct pblk_line *line, sector_t lba,
546                       u64 paddr_gc)
547 {
548         struct ppa_addr ppa_l2p, ppa_gc;
549         int valid_secs = 0;
550
551         if (lba == ADDR_EMPTY)
552                 goto out;
553
554         /* logic error: lba out-of-bounds */
555         if (lba >= pblk->rl.nr_secs) {
556                 WARN(1, "pblk: read lba out of bounds\n");
557                 goto out;
558         }
559
560         spin_lock(&pblk->trans_lock);
561         ppa_l2p = pblk_trans_map_get(pblk, lba);
562         spin_unlock(&pblk->trans_lock);
563
564         ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
565         if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
566                 goto out;
567
568         rqd->ppa_addr = ppa_l2p;
569         valid_secs = 1;
570
571 #ifdef CONFIG_NVM_PBLK_DEBUG
572         atomic_long_inc(&pblk->inflight_reads);
573 #endif
574
575 out:
576         return valid_secs;
577 }
578
579 int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
580 {
581         struct nvm_tgt_dev *dev = pblk->dev;
582         struct nvm_geo *geo = &dev->geo;
583         struct bio *bio;
584         struct nvm_rq rqd;
585         int data_len;
586         int ret = NVM_IO_OK;
587
588         memset(&rqd, 0, sizeof(struct nvm_rq));
589
590         ret = pblk_alloc_rqd_meta(pblk, &rqd);
591         if (ret)
592                 return ret;
593
594         if (gc_rq->nr_secs > 1) {
595                 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
596                                                         gc_rq->lba_list,
597                                                         gc_rq->paddr_list,
598                                                         gc_rq->nr_secs);
599                 if (gc_rq->secs_to_gc == 1)
600                         rqd.ppa_addr = rqd.ppa_list[0];
601         } else {
602                 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
603                                                         gc_rq->lba_list[0],
604                                                         gc_rq->paddr_list[0]);
605         }
606
607         if (!(gc_rq->secs_to_gc))
608                 goto out;
609
610         data_len = (gc_rq->secs_to_gc) * geo->csecs;
611         bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
612                                                 PBLK_VMALLOC_META, GFP_KERNEL);
613         if (IS_ERR(bio)) {
614                 pblk_err(pblk, "could not allocate GC bio (%lu)\n",
615                                                                 PTR_ERR(bio));
616                 ret = PTR_ERR(bio);
617                 goto err_free_dma;
618         }
619
620         bio->bi_iter.bi_sector = 0; /* internal bio */
621         bio_set_op_attrs(bio, REQ_OP_READ, 0);
622
623         rqd.opcode = NVM_OP_PREAD;
624         rqd.nr_ppas = gc_rq->secs_to_gc;
625         rqd.bio = bio;
626
627         if (pblk_submit_io_sync(pblk, &rqd)) {
628                 ret = -EIO;
629                 pblk_err(pblk, "GC read request failed\n");
630                 goto err_free_bio;
631         }
632
633         pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
634
635         atomic_dec(&pblk->inflight_io);
636
637         if (rqd.error) {
638                 atomic_long_inc(&pblk->read_failed_gc);
639 #ifdef CONFIG_NVM_PBLK_DEBUG
640                 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
641 #endif
642         }
643
644 #ifdef CONFIG_NVM_PBLK_DEBUG
645         atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
646         atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
647         atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
648 #endif
649
650 out:
651         pblk_free_rqd_meta(pblk, &rqd);
652         return ret;
653
654 err_free_bio:
655         bio_put(bio);
656 err_free_dma:
657         pblk_free_rqd_meta(pblk, &rqd);
658         return ret;
659 }