lightnvm: pblk: check read lba on gc path
[linux-block.git] / drivers / lightnvm / pblk-read.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-read.c - pblk's read path
16 */
17
18#include "pblk.h"
19
20/*
21 * There is no guarantee that the value read from cache has not been updated and
22 * resides at another location in the cache. We guarantee though that if the
23 * value is read from the cache, it belongs to the mapped lba. In order to
24 * guarantee and order between writes and reads are ordered, a flush must be
25 * issued.
26 */
27static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28 sector_t lba, struct ppa_addr ppa,
75cb8e93 29 int bio_iter, bool advanced_bio)
a4bd217b
JG
30{
31#ifdef CONFIG_NVM_DEBUG
32 /* Callers must ensure that the ppa points to a cache address */
33 BUG_ON(pblk_ppa_empty(ppa));
34 BUG_ON(!pblk_addr_in_cache(ppa));
35#endif
36
75cb8e93
JG
37 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
38 bio_iter, advanced_bio);
a4bd217b
JG
39}
40
41static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
84454e6d 42 sector_t blba, unsigned long *read_bitmap)
a4bd217b 43{
a4809fee 44 struct pblk_sec_meta *meta_list = rqd->meta_list;
a4bd217b
JG
45 struct bio *bio = rqd->bio;
46 struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
a4bd217b 47 int nr_secs = rqd->nr_ppas;
75cb8e93 48 bool advanced_bio = false;
a4bd217b
JG
49 int i, j = 0;
50
a4bd217b
JG
51 pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
52
53 for (i = 0; i < nr_secs; i++) {
54 struct ppa_addr p = ppas[i];
55 sector_t lba = blba + i;
56
57retry:
58 if (pblk_ppa_empty(p)) {
59 WARN_ON(test_and_set_bit(i, read_bitmap));
a4809fee 60 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
75cb8e93
JG
61
62 if (unlikely(!advanced_bio)) {
63 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
64 advanced_bio = true;
65 }
66
67 goto next;
a4bd217b
JG
68 }
69
70 /* Try to read from write buffer. The address is later checked
71 * on the write buffer to prevent retrieving overwritten data.
72 */
73 if (pblk_addr_in_cache(p)) {
75cb8e93
JG
74 if (!pblk_read_from_cache(pblk, bio, lba, p, i,
75 advanced_bio)) {
a4bd217b
JG
76 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
77 goto retry;
78 }
79 WARN_ON(test_and_set_bit(i, read_bitmap));
a4809fee 80 meta_list[i].lba = cpu_to_le64(lba);
75cb8e93 81 advanced_bio = true;
db7ada33
JG
82#ifdef CONFIG_NVM_DEBUG
83 atomic_long_inc(&pblk->cache_reads);
84#endif
a4bd217b
JG
85 } else {
86 /* Read from media non-cached sectors */
87 rqd->ppa_list[j++] = p;
88 }
89
75cb8e93 90next:
a4bd217b
JG
91 if (advanced_bio)
92 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
93 }
94
f9c10152
JG
95 if (pblk_io_aligned(pblk, nr_secs))
96 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
97 else
98 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
99
a4bd217b
JG
100#ifdef CONFIG_NVM_DEBUG
101 atomic_long_add(nr_secs, &pblk->inflight_reads);
102#endif
103}
104
105static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
106{
107 int err;
108
a4bd217b
JG
109 err = pblk_submit_io(pblk, rqd);
110 if (err)
111 return NVM_IO_ERR;
112
113 return NVM_IO_OK;
114}
115
310df582
JG
116static void pblk_read_check_seq(struct pblk *pblk, void *meta_list,
117 sector_t blba, int nr_lbas)
a4809fee 118{
310df582 119 struct pblk_sec_meta *meta_lba_list = meta_list;
a4809fee
JG
120 int i;
121
122 for (i = 0; i < nr_lbas; i++) {
310df582 123 u64 lba = le64_to_cpu(meta_lba_list[i].lba);
a4809fee
JG
124
125 if (lba == ADDR_EMPTY)
126 continue;
127
128 WARN(lba != blba + i, "pblk: corrupted read LBA\n");
129 }
130}
131
310df582
JG
132/*
133 * There can be holes in the lba list.
134 */
135static void pblk_read_check_rand(struct pblk *pblk, void *meta_list,
136 u64 *lba_list, int nr_lbas)
137{
138 struct pblk_sec_meta *meta_lba_list = meta_list;
139 int i, j;
140
141 for (i = 0, j = 0; i < nr_lbas; i++) {
142 u64 lba = lba_list[i];
143 u64 meta_lba;
144
145 if (lba == ADDR_EMPTY)
146 continue;
147
148 meta_lba = le64_to_cpu(meta_lba_list[j++].lba);
149
150 if (lba != meta_lba) {
151 pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
152 lba, meta_lba);
153 WARN_ON(1);
154 }
155 }
156}
157
7bd4d370
JG
158static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
159{
160 struct ppa_addr *ppa_list;
161 int i;
162
163 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
164
165 for (i = 0; i < rqd->nr_ppas; i++) {
166 struct ppa_addr ppa = ppa_list[i];
167 struct pblk_line *line;
168
b1bcfda1 169 line = &pblk->lines[pblk_ppa_to_line(ppa)];
7bd4d370
JG
170 kref_put(&line->ref, pblk_line_put_wq);
171 }
172}
173
174static void pblk_end_user_read(struct bio *bio)
175{
176#ifdef CONFIG_NVM_DEBUG
177 WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
178#endif
179 bio_endio(bio);
180 bio_put(bio);
181}
182
183static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
184 bool put_line)
a4bd217b 185{
998ba629 186 struct nvm_tgt_dev *dev = pblk->dev;
084ec9ba 187 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
a4bd217b 188 struct bio *bio = rqd->bio;
998ba629
JG
189 unsigned long start_time = r_ctx->start_time;
190
191 generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
a4bd217b
JG
192
193 if (rqd->error)
194 pblk_log_read_err(pblk, rqd);
195#ifdef CONFIG_NVM_DEBUG
196 else
4e4cbee9 197 WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
a4bd217b
JG
198#endif
199
310df582 200 pblk_read_check_seq(pblk, rqd->meta_list, r_ctx->lba, rqd->nr_ppas);
a4809fee 201
a4bd217b 202 bio_put(bio);
7bd4d370
JG
203 if (r_ctx->private)
204 pblk_end_user_read((struct bio *)r_ctx->private);
084ec9ba 205
7bd4d370
JG
206 if (put_line)
207 pblk_read_put_rqd_kref(pblk, rqd);
a4bd217b
JG
208
209#ifdef CONFIG_NVM_DEBUG
210 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
211 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
212#endif
213
e2cddf20 214 pblk_free_rqd(pblk, rqd, PBLK_READ);
588726d3 215 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
216}
217
7bd4d370
JG
218static void pblk_end_io_read(struct nvm_rq *rqd)
219{
220 struct pblk *pblk = rqd->private;
221
222 __pblk_end_io_read(pblk, rqd, true);
223}
224
998ba629
JG
225static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
226 unsigned int bio_init_idx,
227 unsigned long *read_bitmap)
a4bd217b
JG
228{
229 struct bio *new_bio, *bio = rqd->bio;
a4809fee 230 struct pblk_sec_meta *meta_list = rqd->meta_list;
a4bd217b
JG
231 struct bio_vec src_bv, dst_bv;
232 void *ppa_ptr = NULL;
233 void *src_p, *dst_p;
234 dma_addr_t dma_ppa_list = 0;
a4809fee 235 __le64 *lba_list_mem, *lba_list_media;
a4bd217b
JG
236 int nr_secs = rqd->nr_ppas;
237 int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
238 int i, ret, hole;
a4bd217b 239
a4809fee
JG
240 /* Re-use allocated memory for intermediate lbas */
241 lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
242 lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
243
a4bd217b 244 new_bio = bio_alloc(GFP_KERNEL, nr_holes);
a4bd217b
JG
245
246 if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
247 goto err;
248
249 if (nr_holes != new_bio->bi_vcnt) {
250 pr_err("pblk: malformed bio\n");
251 goto err;
252 }
253
a4809fee
JG
254 for (i = 0; i < nr_secs; i++)
255 lba_list_mem[i] = meta_list[i].lba;
256
a4bd217b
JG
257 new_bio->bi_iter.bi_sector = 0; /* internal bio */
258 bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
a4bd217b
JG
259
260 rqd->bio = new_bio;
261 rqd->nr_ppas = nr_holes;
f9c10152 262 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
a4bd217b 263
0f9248cf 264 if (unlikely(nr_holes == 1)) {
a4bd217b
JG
265 ppa_ptr = rqd->ppa_list;
266 dma_ppa_list = rqd->dma_ppa_list;
267 rqd->ppa_addr = rqd->ppa_list[0];
268 }
269
1a94b2d4 270 ret = pblk_submit_io_sync(pblk, rqd);
a4bd217b
JG
271 if (ret) {
272 bio_put(rqd->bio);
1a94b2d4 273 pr_err("pblk: sync read IO submission failed\n");
a4bd217b
JG
274 goto err;
275 }
276
a4bd217b
JG
277 if (rqd->error) {
278 atomic_long_inc(&pblk->read_failed);
279#ifdef CONFIG_NVM_DEBUG
280 pblk_print_failed_rqd(pblk, rqd, rqd->error);
281#endif
282 }
283
0f9248cf 284 if (unlikely(nr_holes == 1)) {
7bd4d370
JG
285 struct ppa_addr ppa;
286
287 ppa = rqd->ppa_addr;
a4bd217b
JG
288 rqd->ppa_list = ppa_ptr;
289 rqd->dma_ppa_list = dma_ppa_list;
7bd4d370 290 rqd->ppa_list[0] = ppa;
a4bd217b
JG
291 }
292
a4809fee
JG
293 for (i = 0; i < nr_secs; i++) {
294 lba_list_media[i] = meta_list[i].lba;
295 meta_list[i].lba = lba_list_mem[i];
296 }
297
a4bd217b
JG
298 /* Fill the holes in the original bio */
299 i = 0;
300 hole = find_first_zero_bit(read_bitmap, nr_secs);
301 do {
b1bcfda1 302 int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
7bd4d370
JG
303 struct pblk_line *line = &pblk->lines[line_id];
304
305 kref_put(&line->ref, pblk_line_put);
306
a4809fee
JG
307 meta_list[hole].lba = lba_list_media[i];
308
a4bd217b
JG
309 src_bv = new_bio->bi_io_vec[i++];
310 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
311
312 src_p = kmap_atomic(src_bv.bv_page);
313 dst_p = kmap_atomic(dst_bv.bv_page);
314
315 memcpy(dst_p + dst_bv.bv_offset,
316 src_p + src_bv.bv_offset,
317 PBLK_EXPOSED_PAGE_SIZE);
318
319 kunmap_atomic(src_p);
320 kunmap_atomic(dst_p);
321
b906bbb6 322 mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
a4bd217b
JG
323
324 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
325 } while (hole < nr_secs);
326
327 bio_put(new_bio);
328
329 /* Complete the original bio and associated request */
7bd4d370 330 bio_endio(bio);
a4bd217b
JG
331 rqd->bio = bio;
332 rqd->nr_ppas = nr_secs;
a4bd217b 333
7bd4d370 334 __pblk_end_io_read(pblk, rqd, false);
a4bd217b
JG
335 return NVM_IO_OK;
336
337err:
998ba629
JG
338 pr_err("pblk: failed to perform partial read\n");
339
a4bd217b
JG
340 /* Free allocated pages in new bio */
341 pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
7bd4d370 342 __pblk_end_io_read(pblk, rqd, false);
a4bd217b
JG
343 return NVM_IO_ERR;
344}
345
346static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
84454e6d 347 sector_t lba, unsigned long *read_bitmap)
a4bd217b 348{
a4809fee 349 struct pblk_sec_meta *meta_list = rqd->meta_list;
a4bd217b
JG
350 struct bio *bio = rqd->bio;
351 struct ppa_addr ppa;
a4bd217b
JG
352
353 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
354
355#ifdef CONFIG_NVM_DEBUG
356 atomic_long_inc(&pblk->inflight_reads);
357#endif
358
359retry:
360 if (pblk_ppa_empty(ppa)) {
361 WARN_ON(test_and_set_bit(0, read_bitmap));
a4809fee 362 meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
a4bd217b
JG
363 return;
364 }
365
366 /* Try to read from write buffer. The address is later checked on the
367 * write buffer to prevent retrieving overwritten data.
368 */
369 if (pblk_addr_in_cache(ppa)) {
75cb8e93 370 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
a4bd217b
JG
371 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
372 goto retry;
373 }
a4809fee 374
7bd4d370 375 WARN_ON(test_and_set_bit(0, read_bitmap));
a4809fee
JG
376 meta_list[0].lba = cpu_to_le64(lba);
377
db7ada33 378#ifdef CONFIG_NVM_DEBUG
7bd4d370 379 atomic_long_inc(&pblk->cache_reads);
db7ada33 380#endif
a4bd217b
JG
381 } else {
382 rqd->ppa_addr = ppa;
383 }
f9c10152
JG
384
385 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
a4bd217b
JG
386}
387
388int pblk_submit_read(struct pblk *pblk, struct bio *bio)
389{
390 struct nvm_tgt_dev *dev = pblk->dev;
998ba629 391 struct request_queue *q = dev->q;
84454e6d 392 sector_t blba = pblk_get_lba(bio);
5bf1e1ee 393 unsigned int nr_secs = pblk_get_secs(bio);
a4809fee 394 struct pblk_g_ctx *r_ctx;
a4bd217b 395 struct nvm_rq *rqd;
a4bd217b 396 unsigned int bio_init_idx;
a4809fee 397 unsigned long read_bitmap; /* Max 64 ppas per request */
a4bd217b
JG
398 int ret = NVM_IO_ERR;
399
84454e6d
JG
400 /* logic error: lba out-of-bounds. Ignore read request */
401 if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
402 WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
403 (unsigned long long)blba, nr_secs);
a4bd217b 404 return NVM_IO_ERR;
84454e6d 405 }
a4bd217b 406
998ba629
JG
407 generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
408
a4bd217b
JG
409 bitmap_zero(&read_bitmap, nr_secs);
410
e2cddf20 411 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
a4bd217b
JG
412
413 rqd->opcode = NVM_OP_PREAD;
414 rqd->bio = bio;
415 rqd->nr_ppas = nr_secs;
416 rqd->private = pblk;
417 rqd->end_io = pblk_end_io_read;
418
a4809fee 419 r_ctx = nvm_rq_to_pdu(rqd);
998ba629 420 r_ctx->start_time = jiffies;
a4809fee
JG
421 r_ctx->lba = blba;
422
a4bd217b
JG
423 /* Save the index for this bio's start. This is needed in case
424 * we need to fill a partial read.
425 */
426 bio_init_idx = pblk_get_bi_idx(bio);
427
63e3809c
JG
428 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
429 &rqd->dma_meta_list);
430 if (!rqd->meta_list) {
431 pr_err("pblk: not able to allocate ppa list\n");
432 goto fail_rqd_free;
433 }
434
a4bd217b 435 if (nr_secs > 1) {
63e3809c
JG
436 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
437 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
a4bd217b 438
84454e6d 439 pblk_read_ppalist_rq(pblk, rqd, blba, &read_bitmap);
a4bd217b 440 } else {
84454e6d 441 pblk_read_rq(pblk, rqd, blba, &read_bitmap);
a4bd217b
JG
442 }
443
444 bio_get(bio);
445 if (bitmap_full(&read_bitmap, nr_secs)) {
446 bio_endio(bio);
588726d3 447 atomic_inc(&pblk->inflight_io);
7bd4d370 448 __pblk_end_io_read(pblk, rqd, false);
a4bd217b
JG
449 return NVM_IO_OK;
450 }
451
452 /* All sectors are to be read from the device */
453 if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
454 struct bio *int_bio = NULL;
a4bd217b
JG
455
456 /* Clone read bio to deal with read errors internally */
b906bbb6 457 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
a4bd217b
JG
458 if (!int_bio) {
459 pr_err("pblk: could not clone read bio\n");
998ba629 460 goto fail_end_io;
a4bd217b
JG
461 }
462
463 rqd->bio = int_bio;
084ec9ba 464 r_ctx->private = bio;
a4bd217b
JG
465
466 ret = pblk_submit_read_io(pblk, rqd);
467 if (ret) {
468 pr_err("pblk: read IO submission failed\n");
469 if (int_bio)
470 bio_put(int_bio);
998ba629 471 goto fail_end_io;
a4bd217b
JG
472 }
473
474 return NVM_IO_OK;
475 }
476
477 /* The read bio request could be partially filled by the write buffer,
478 * but there are some holes that need to be read from the drive.
479 */
998ba629 480 return pblk_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
a4bd217b
JG
481
482fail_rqd_free:
e2cddf20 483 pblk_free_rqd(pblk, rqd, PBLK_READ);
a4bd217b 484 return ret;
998ba629
JG
485fail_end_io:
486 __pblk_end_io_read(pblk, rqd, false);
487 return ret;
a4bd217b
JG
488}
489
490static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
491 struct pblk_line *line, u64 *lba_list,
d340121e 492 u64 *paddr_list_gc, unsigned int nr_secs)
a4bd217b 493{
d340121e
JG
494 struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
495 struct ppa_addr ppa_gc;
a4bd217b
JG
496 int valid_secs = 0;
497 int i;
498
d340121e 499 pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
a4bd217b
JG
500
501 for (i = 0; i < nr_secs; i++) {
d340121e
JG
502 if (lba_list[i] == ADDR_EMPTY)
503 continue;
504
505 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
506 if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
507 paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
a4bd217b
JG
508 continue;
509 }
510
d340121e 511 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
a4bd217b
JG
512 }
513
514#ifdef CONFIG_NVM_DEBUG
515 atomic_long_add(valid_secs, &pblk->inflight_reads);
516#endif
d340121e 517
a4bd217b
JG
518 return valid_secs;
519}
520
521static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
d340121e
JG
522 struct pblk_line *line, sector_t lba,
523 u64 paddr_gc)
a4bd217b 524{
d340121e 525 struct ppa_addr ppa_l2p, ppa_gc;
a4bd217b
JG
526 int valid_secs = 0;
527
659226eb
DC
528 if (lba == ADDR_EMPTY)
529 goto out;
530
a4bd217b 531 /* logic error: lba out-of-bounds */
2a79efd8
DC
532 if (lba >= pblk->rl.nr_secs) {
533 WARN(1, "pblk: read lba out of bounds\n");
a4bd217b
JG
534 goto out;
535 }
536
a4bd217b 537 spin_lock(&pblk->trans_lock);
d340121e 538 ppa_l2p = pblk_trans_map_get(pblk, lba);
a4bd217b
JG
539 spin_unlock(&pblk->trans_lock);
540
d340121e
JG
541 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
542 if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
a4bd217b
JG
543 goto out;
544
d340121e 545 rqd->ppa_addr = ppa_l2p;
a4bd217b
JG
546 valid_secs = 1;
547
548#ifdef CONFIG_NVM_DEBUG
549 atomic_long_inc(&pblk->inflight_reads);
550#endif
551
552out:
553 return valid_secs;
554}
555
d340121e 556int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
a4bd217b
JG
557{
558 struct nvm_tgt_dev *dev = pblk->dev;
559 struct nvm_geo *geo = &dev->geo;
a4bd217b
JG
560 struct bio *bio;
561 struct nvm_rq rqd;
d340121e
JG
562 int data_len;
563 int ret = NVM_IO_OK;
a4bd217b
JG
564
565 memset(&rqd, 0, sizeof(struct nvm_rq));
566
63e3809c
JG
567 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
568 &rqd.dma_meta_list);
569 if (!rqd.meta_list)
d340121e 570 return -ENOMEM;
63e3809c 571
d340121e 572 if (gc_rq->nr_secs > 1) {
63e3809c
JG
573 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
574 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
a4bd217b 575
d340121e
JG
576 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
577 gc_rq->lba_list,
578 gc_rq->paddr_list,
579 gc_rq->nr_secs);
580 if (gc_rq->secs_to_gc == 1)
63e3809c 581 rqd.ppa_addr = rqd.ppa_list[0];
a4bd217b 582 } else {
d340121e
JG
583 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
584 gc_rq->lba_list[0],
585 gc_rq->paddr_list[0]);
a4bd217b
JG
586 }
587
d340121e 588 if (!(gc_rq->secs_to_gc))
a4bd217b
JG
589 goto out;
590
e46f4e48 591 data_len = (gc_rq->secs_to_gc) * geo->csecs;
d340121e 592 bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
7d327a9e 593 PBLK_VMALLOC_META, GFP_KERNEL);
a4bd217b
JG
594 if (IS_ERR(bio)) {
595 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
596 goto err_free_dma;
597 }
598
599 bio->bi_iter.bi_sector = 0; /* internal bio */
600 bio_set_op_attrs(bio, REQ_OP_READ, 0);
601
602 rqd.opcode = NVM_OP_PREAD;
d340121e 603 rqd.nr_ppas = gc_rq->secs_to_gc;
f9c10152 604 rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
a4bd217b
JG
605 rqd.bio = bio;
606
1a94b2d4 607 if (pblk_submit_io_sync(pblk, &rqd)) {
d340121e 608 ret = -EIO;
a4bd217b 609 pr_err("pblk: GC read request failed\n");
7d327a9e 610 goto err_free_bio;
a4bd217b
JG
611 }
612
310df582
JG
613 pblk_read_check_rand(pblk, rqd.meta_list, gc_rq->lba_list, rqd.nr_ppas);
614
588726d3 615 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
616
617 if (rqd.error) {
618 atomic_long_inc(&pblk->read_failed_gc);
619#ifdef CONFIG_NVM_DEBUG
620 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
621#endif
622 }
623
624#ifdef CONFIG_NVM_DEBUG
d340121e
JG
625 atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
626 atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
627 atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
a4bd217b
JG
628#endif
629
630out:
63e3809c 631 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
d340121e 632 return ret;
a4bd217b 633
7d327a9e
JG
634err_free_bio:
635 bio_put(bio);
a4bd217b 636err_free_dma:
63e3809c 637 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
d340121e 638 return ret;
a4bd217b 639}