lightnvm: pblk: guarantee line integrity on reads
[linux-block.git] / drivers / lightnvm / pblk-read.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-read.c - pblk's read path
16 */
17
18#include "pblk.h"
19
20/*
21 * There is no guarantee that the value read from cache has not been updated and
22 * resides at another location in the cache. We guarantee though that if the
23 * value is read from the cache, it belongs to the mapped lba. In order to
24 * guarantee and order between writes and reads are ordered, a flush must be
25 * issued.
26 */
27static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28 sector_t lba, struct ppa_addr ppa,
75cb8e93 29 int bio_iter, bool advanced_bio)
a4bd217b
JG
30{
31#ifdef CONFIG_NVM_DEBUG
32 /* Callers must ensure that the ppa points to a cache address */
33 BUG_ON(pblk_ppa_empty(ppa));
34 BUG_ON(!pblk_addr_in_cache(ppa));
35#endif
36
75cb8e93
JG
37 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
38 bio_iter, advanced_bio);
a4bd217b
JG
39}
40
41static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
84454e6d 42 sector_t blba, unsigned long *read_bitmap)
a4bd217b 43{
a4809fee 44 struct pblk_sec_meta *meta_list = rqd->meta_list;
a4bd217b
JG
45 struct bio *bio = rqd->bio;
46 struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
a4bd217b 47 int nr_secs = rqd->nr_ppas;
75cb8e93 48 bool advanced_bio = false;
a4bd217b
JG
49 int i, j = 0;
50
a4bd217b
JG
51 pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
52
53 for (i = 0; i < nr_secs; i++) {
54 struct ppa_addr p = ppas[i];
55 sector_t lba = blba + i;
56
57retry:
58 if (pblk_ppa_empty(p)) {
59 WARN_ON(test_and_set_bit(i, read_bitmap));
a4809fee 60 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
75cb8e93
JG
61
62 if (unlikely(!advanced_bio)) {
63 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
64 advanced_bio = true;
65 }
66
67 goto next;
a4bd217b
JG
68 }
69
70 /* Try to read from write buffer. The address is later checked
71 * on the write buffer to prevent retrieving overwritten data.
72 */
73 if (pblk_addr_in_cache(p)) {
75cb8e93
JG
74 if (!pblk_read_from_cache(pblk, bio, lba, p, i,
75 advanced_bio)) {
a4bd217b
JG
76 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
77 goto retry;
78 }
79 WARN_ON(test_and_set_bit(i, read_bitmap));
a4809fee 80 meta_list[i].lba = cpu_to_le64(lba);
75cb8e93 81 advanced_bio = true;
db7ada33
JG
82#ifdef CONFIG_NVM_DEBUG
83 atomic_long_inc(&pblk->cache_reads);
84#endif
a4bd217b
JG
85 } else {
86 /* Read from media non-cached sectors */
87 rqd->ppa_list[j++] = p;
88 }
89
75cb8e93 90next:
a4bd217b
JG
91 if (advanced_bio)
92 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
93 }
94
f9c10152
JG
95 if (pblk_io_aligned(pblk, nr_secs))
96 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
97 else
98 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
99
a4bd217b
JG
100#ifdef CONFIG_NVM_DEBUG
101 atomic_long_add(nr_secs, &pblk->inflight_reads);
102#endif
103}
104
105static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
106{
107 int err;
108
a4bd217b
JG
109 err = pblk_submit_io(pblk, rqd);
110 if (err)
111 return NVM_IO_ERR;
112
113 return NVM_IO_OK;
114}
115
a4809fee
JG
116static void pblk_read_check(struct pblk *pblk, struct nvm_rq *rqd,
117 sector_t blba)
118{
119 struct pblk_sec_meta *meta_list = rqd->meta_list;
120 int nr_lbas = rqd->nr_ppas;
121 int i;
122
123 for (i = 0; i < nr_lbas; i++) {
124 u64 lba = le64_to_cpu(meta_list[i].lba);
125
126 if (lba == ADDR_EMPTY)
127 continue;
128
129 WARN(lba != blba + i, "pblk: corrupted read LBA\n");
130 }
131}
132
7bd4d370
JG
133static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
134{
135 struct ppa_addr *ppa_list;
136 int i;
137
138 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
139
140 for (i = 0; i < rqd->nr_ppas; i++) {
141 struct ppa_addr ppa = ppa_list[i];
142 struct pblk_line *line;
143
144 line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
145 kref_put(&line->ref, pblk_line_put_wq);
146 }
147}
148
149static void pblk_end_user_read(struct bio *bio)
150{
151#ifdef CONFIG_NVM_DEBUG
152 WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
153#endif
154 bio_endio(bio);
155 bio_put(bio);
156}
157
158static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
159 bool put_line)
a4bd217b 160{
084ec9ba 161 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
a4bd217b
JG
162 struct bio *bio = rqd->bio;
163
164 if (rqd->error)
165 pblk_log_read_err(pblk, rqd);
166#ifdef CONFIG_NVM_DEBUG
167 else
4e4cbee9 168 WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
a4bd217b
JG
169#endif
170
a4809fee
JG
171 pblk_read_check(pblk, rqd, r_ctx->lba);
172
a4bd217b 173 bio_put(bio);
7bd4d370
JG
174 if (r_ctx->private)
175 pblk_end_user_read((struct bio *)r_ctx->private);
084ec9ba 176
7bd4d370
JG
177 if (put_line)
178 pblk_read_put_rqd_kref(pblk, rqd);
a4bd217b
JG
179
180#ifdef CONFIG_NVM_DEBUG
181 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
182 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
183#endif
184
e2cddf20 185 pblk_free_rqd(pblk, rqd, PBLK_READ);
588726d3 186 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
187}
188
7bd4d370
JG
189static void pblk_end_io_read(struct nvm_rq *rqd)
190{
191 struct pblk *pblk = rqd->private;
192
193 __pblk_end_io_read(pblk, rqd, true);
194}
195
a4bd217b
JG
196static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
197 unsigned int bio_init_idx,
198 unsigned long *read_bitmap)
199{
200 struct bio *new_bio, *bio = rqd->bio;
a4809fee 201 struct pblk_sec_meta *meta_list = rqd->meta_list;
a4bd217b
JG
202 struct bio_vec src_bv, dst_bv;
203 void *ppa_ptr = NULL;
204 void *src_p, *dst_p;
205 dma_addr_t dma_ppa_list = 0;
a4809fee 206 __le64 *lba_list_mem, *lba_list_media;
a4bd217b
JG
207 int nr_secs = rqd->nr_ppas;
208 int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
209 int i, ret, hole;
210 DECLARE_COMPLETION_ONSTACK(wait);
211
a4809fee
JG
212 /* Re-use allocated memory for intermediate lbas */
213 lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
214 lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
215
a4bd217b 216 new_bio = bio_alloc(GFP_KERNEL, nr_holes);
a4bd217b
JG
217
218 if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
219 goto err;
220
221 if (nr_holes != new_bio->bi_vcnt) {
222 pr_err("pblk: malformed bio\n");
223 goto err;
224 }
225
a4809fee
JG
226 for (i = 0; i < nr_secs; i++)
227 lba_list_mem[i] = meta_list[i].lba;
228
a4bd217b
JG
229 new_bio->bi_iter.bi_sector = 0; /* internal bio */
230 bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
a4bd217b
JG
231
232 rqd->bio = new_bio;
233 rqd->nr_ppas = nr_holes;
f9c10152 234 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
26532ee5
JG
235 rqd->end_io = pblk_end_io_sync;
236 rqd->private = &wait;
a4bd217b
JG
237
238 if (unlikely(nr_secs > 1 && nr_holes == 1)) {
239 ppa_ptr = rqd->ppa_list;
240 dma_ppa_list = rqd->dma_ppa_list;
241 rqd->ppa_addr = rqd->ppa_list[0];
242 }
243
244 ret = pblk_submit_read_io(pblk, rqd);
245 if (ret) {
246 bio_put(rqd->bio);
247 pr_err("pblk: read IO submission failed\n");
248 goto err;
249 }
250
251 if (!wait_for_completion_io_timeout(&wait,
252 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
253 pr_err("pblk: partial read I/O timed out\n");
254 }
255
256 if (rqd->error) {
257 atomic_long_inc(&pblk->read_failed);
258#ifdef CONFIG_NVM_DEBUG
259 pblk_print_failed_rqd(pblk, rqd, rqd->error);
260#endif
261 }
262
263 if (unlikely(nr_secs > 1 && nr_holes == 1)) {
7bd4d370
JG
264 struct ppa_addr ppa;
265
266 ppa = rqd->ppa_addr;
a4bd217b
JG
267 rqd->ppa_list = ppa_ptr;
268 rqd->dma_ppa_list = dma_ppa_list;
7bd4d370 269 rqd->ppa_list[0] = ppa;
a4bd217b
JG
270 }
271
a4809fee
JG
272 for (i = 0; i < nr_secs; i++) {
273 lba_list_media[i] = meta_list[i].lba;
274 meta_list[i].lba = lba_list_mem[i];
275 }
276
a4bd217b
JG
277 /* Fill the holes in the original bio */
278 i = 0;
279 hole = find_first_zero_bit(read_bitmap, nr_secs);
280 do {
7bd4d370
JG
281 int line_id = pblk_dev_ppa_to_line(rqd->ppa_list[i]);
282 struct pblk_line *line = &pblk->lines[line_id];
283
284 kref_put(&line->ref, pblk_line_put);
285
a4809fee
JG
286 meta_list[hole].lba = lba_list_media[i];
287
a4bd217b
JG
288 src_bv = new_bio->bi_io_vec[i++];
289 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
290
291 src_p = kmap_atomic(src_bv.bv_page);
292 dst_p = kmap_atomic(dst_bv.bv_page);
293
294 memcpy(dst_p + dst_bv.bv_offset,
295 src_p + src_bv.bv_offset,
296 PBLK_EXPOSED_PAGE_SIZE);
297
298 kunmap_atomic(src_p);
299 kunmap_atomic(dst_p);
300
bd432417 301 mempool_free(src_bv.bv_page, pblk->page_bio_pool);
a4bd217b
JG
302
303 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
304 } while (hole < nr_secs);
305
306 bio_put(new_bio);
307
308 /* Complete the original bio and associated request */
7bd4d370 309 bio_endio(bio);
a4bd217b
JG
310 rqd->bio = bio;
311 rqd->nr_ppas = nr_secs;
a4bd217b 312
7bd4d370 313 __pblk_end_io_read(pblk, rqd, false);
a4bd217b
JG
314 return NVM_IO_OK;
315
316err:
317 /* Free allocated pages in new bio */
318 pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
7bd4d370 319 __pblk_end_io_read(pblk, rqd, false);
a4bd217b
JG
320 return NVM_IO_ERR;
321}
322
323static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
84454e6d 324 sector_t lba, unsigned long *read_bitmap)
a4bd217b 325{
a4809fee 326 struct pblk_sec_meta *meta_list = rqd->meta_list;
a4bd217b
JG
327 struct bio *bio = rqd->bio;
328 struct ppa_addr ppa;
a4bd217b
JG
329
330 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
331
332#ifdef CONFIG_NVM_DEBUG
333 atomic_long_inc(&pblk->inflight_reads);
334#endif
335
336retry:
337 if (pblk_ppa_empty(ppa)) {
338 WARN_ON(test_and_set_bit(0, read_bitmap));
a4809fee 339 meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
a4bd217b
JG
340 return;
341 }
342
343 /* Try to read from write buffer. The address is later checked on the
344 * write buffer to prevent retrieving overwritten data.
345 */
346 if (pblk_addr_in_cache(ppa)) {
75cb8e93 347 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
a4bd217b
JG
348 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
349 goto retry;
350 }
a4809fee 351
7bd4d370 352 WARN_ON(test_and_set_bit(0, read_bitmap));
a4809fee
JG
353 meta_list[0].lba = cpu_to_le64(lba);
354
db7ada33 355#ifdef CONFIG_NVM_DEBUG
7bd4d370 356 atomic_long_inc(&pblk->cache_reads);
db7ada33 357#endif
a4bd217b
JG
358 } else {
359 rqd->ppa_addr = ppa;
360 }
f9c10152
JG
361
362 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
a4bd217b
JG
363}
364
365int pblk_submit_read(struct pblk *pblk, struct bio *bio)
366{
367 struct nvm_tgt_dev *dev = pblk->dev;
84454e6d 368 sector_t blba = pblk_get_lba(bio);
5bf1e1ee 369 unsigned int nr_secs = pblk_get_secs(bio);
a4809fee 370 struct pblk_g_ctx *r_ctx;
a4bd217b 371 struct nvm_rq *rqd;
a4bd217b 372 unsigned int bio_init_idx;
a4809fee 373 unsigned long read_bitmap; /* Max 64 ppas per request */
a4bd217b
JG
374 int ret = NVM_IO_ERR;
375
84454e6d
JG
376 /* logic error: lba out-of-bounds. Ignore read request */
377 if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
378 WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
379 (unsigned long long)blba, nr_secs);
a4bd217b 380 return NVM_IO_ERR;
84454e6d 381 }
a4bd217b
JG
382
383 bitmap_zero(&read_bitmap, nr_secs);
384
e2cddf20 385 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
a4bd217b
JG
386
387 rqd->opcode = NVM_OP_PREAD;
388 rqd->bio = bio;
389 rqd->nr_ppas = nr_secs;
390 rqd->private = pblk;
391 rqd->end_io = pblk_end_io_read;
392
a4809fee
JG
393 r_ctx = nvm_rq_to_pdu(rqd);
394 r_ctx->lba = blba;
395
a4bd217b
JG
396 /* Save the index for this bio's start. This is needed in case
397 * we need to fill a partial read.
398 */
399 bio_init_idx = pblk_get_bi_idx(bio);
400
63e3809c
JG
401 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
402 &rqd->dma_meta_list);
403 if (!rqd->meta_list) {
404 pr_err("pblk: not able to allocate ppa list\n");
405 goto fail_rqd_free;
406 }
407
a4bd217b 408 if (nr_secs > 1) {
63e3809c
JG
409 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
410 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
a4bd217b 411
84454e6d 412 pblk_read_ppalist_rq(pblk, rqd, blba, &read_bitmap);
a4bd217b 413 } else {
84454e6d 414 pblk_read_rq(pblk, rqd, blba, &read_bitmap);
a4bd217b
JG
415 }
416
417 bio_get(bio);
418 if (bitmap_full(&read_bitmap, nr_secs)) {
419 bio_endio(bio);
588726d3 420 atomic_inc(&pblk->inflight_io);
7bd4d370 421 __pblk_end_io_read(pblk, rqd, false);
a4bd217b
JG
422 return NVM_IO_OK;
423 }
424
425 /* All sectors are to be read from the device */
426 if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
427 struct bio *int_bio = NULL;
a4bd217b
JG
428
429 /* Clone read bio to deal with read errors internally */
b25d5237 430 int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
a4bd217b
JG
431 if (!int_bio) {
432 pr_err("pblk: could not clone read bio\n");
433 return NVM_IO_ERR;
434 }
435
436 rqd->bio = int_bio;
084ec9ba 437 r_ctx->private = bio;
a4bd217b
JG
438
439 ret = pblk_submit_read_io(pblk, rqd);
440 if (ret) {
441 pr_err("pblk: read IO submission failed\n");
442 if (int_bio)
443 bio_put(int_bio);
444 return ret;
445 }
446
447 return NVM_IO_OK;
448 }
449
450 /* The read bio request could be partially filled by the write buffer,
451 * but there are some holes that need to be read from the drive.
452 */
453 ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
454 if (ret) {
455 pr_err("pblk: failed to perform partial read\n");
456 return ret;
457 }
458
459 return NVM_IO_OK;
460
461fail_rqd_free:
e2cddf20 462 pblk_free_rqd(pblk, rqd, PBLK_READ);
a4bd217b
JG
463 return ret;
464}
465
466static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
467 struct pblk_line *line, u64 *lba_list,
d340121e 468 u64 *paddr_list_gc, unsigned int nr_secs)
a4bd217b 469{
d340121e
JG
470 struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
471 struct ppa_addr ppa_gc;
a4bd217b
JG
472 int valid_secs = 0;
473 int i;
474
d340121e 475 pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
a4bd217b
JG
476
477 for (i = 0; i < nr_secs; i++) {
d340121e
JG
478 if (lba_list[i] == ADDR_EMPTY)
479 continue;
480
481 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
482 if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
483 paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
a4bd217b
JG
484 continue;
485 }
486
d340121e 487 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
a4bd217b
JG
488 }
489
490#ifdef CONFIG_NVM_DEBUG
491 atomic_long_add(valid_secs, &pblk->inflight_reads);
492#endif
d340121e 493
a4bd217b
JG
494 return valid_secs;
495}
496
497static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
d340121e
JG
498 struct pblk_line *line, sector_t lba,
499 u64 paddr_gc)
a4bd217b 500{
d340121e 501 struct ppa_addr ppa_l2p, ppa_gc;
a4bd217b
JG
502 int valid_secs = 0;
503
659226eb
DC
504 if (lba == ADDR_EMPTY)
505 goto out;
506
a4bd217b 507 /* logic error: lba out-of-bounds */
2a79efd8
DC
508 if (lba >= pblk->rl.nr_secs) {
509 WARN(1, "pblk: read lba out of bounds\n");
a4bd217b
JG
510 goto out;
511 }
512
a4bd217b 513 spin_lock(&pblk->trans_lock);
d340121e 514 ppa_l2p = pblk_trans_map_get(pblk, lba);
a4bd217b
JG
515 spin_unlock(&pblk->trans_lock);
516
d340121e
JG
517 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
518 if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
a4bd217b
JG
519 goto out;
520
d340121e 521 rqd->ppa_addr = ppa_l2p;
a4bd217b
JG
522 valid_secs = 1;
523
524#ifdef CONFIG_NVM_DEBUG
525 atomic_long_inc(&pblk->inflight_reads);
526#endif
527
528out:
529 return valid_secs;
530}
531
d340121e 532int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
a4bd217b
JG
533{
534 struct nvm_tgt_dev *dev = pblk->dev;
535 struct nvm_geo *geo = &dev->geo;
a4bd217b
JG
536 struct bio *bio;
537 struct nvm_rq rqd;
d340121e
JG
538 int data_len;
539 int ret = NVM_IO_OK;
a4bd217b
JG
540 DECLARE_COMPLETION_ONSTACK(wait);
541
542 memset(&rqd, 0, sizeof(struct nvm_rq));
543
63e3809c
JG
544 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
545 &rqd.dma_meta_list);
546 if (!rqd.meta_list)
d340121e 547 return -ENOMEM;
63e3809c 548
d340121e 549 if (gc_rq->nr_secs > 1) {
63e3809c
JG
550 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
551 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
a4bd217b 552
d340121e
JG
553 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
554 gc_rq->lba_list,
555 gc_rq->paddr_list,
556 gc_rq->nr_secs);
557 if (gc_rq->secs_to_gc == 1)
63e3809c 558 rqd.ppa_addr = rqd.ppa_list[0];
a4bd217b 559 } else {
d340121e
JG
560 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
561 gc_rq->lba_list[0],
562 gc_rq->paddr_list[0]);
a4bd217b
JG
563 }
564
d340121e 565 if (!(gc_rq->secs_to_gc))
a4bd217b
JG
566 goto out;
567
d340121e
JG
568 data_len = (gc_rq->secs_to_gc) * geo->sec_size;
569 bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
7d327a9e 570 PBLK_VMALLOC_META, GFP_KERNEL);
a4bd217b
JG
571 if (IS_ERR(bio)) {
572 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
573 goto err_free_dma;
574 }
575
576 bio->bi_iter.bi_sector = 0; /* internal bio */
577 bio_set_op_attrs(bio, REQ_OP_READ, 0);
578
579 rqd.opcode = NVM_OP_PREAD;
580 rqd.end_io = pblk_end_io_sync;
581 rqd.private = &wait;
d340121e 582 rqd.nr_ppas = gc_rq->secs_to_gc;
f9c10152 583 rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
a4bd217b
JG
584 rqd.bio = bio;
585
d340121e
JG
586 if (pblk_submit_read_io(pblk, &rqd)) {
587 ret = -EIO;
a4bd217b 588 pr_err("pblk: GC read request failed\n");
7d327a9e 589 goto err_free_bio;
a4bd217b
JG
590 }
591
592 if (!wait_for_completion_io_timeout(&wait,
593 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
594 pr_err("pblk: GC read I/O timed out\n");
595 }
588726d3 596 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
597
598 if (rqd.error) {
599 atomic_long_inc(&pblk->read_failed_gc);
600#ifdef CONFIG_NVM_DEBUG
601 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
602#endif
603 }
604
605#ifdef CONFIG_NVM_DEBUG
d340121e
JG
606 atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
607 atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
608 atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
a4bd217b
JG
609#endif
610
611out:
63e3809c 612 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
d340121e 613 return ret;
a4bd217b 614
7d327a9e
JG
615err_free_bio:
616 bio_put(bio);
a4bd217b 617err_free_dma:
63e3809c 618 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
d340121e 619 return ret;
a4bd217b 620}