lightnvm: limit get chunk meta request size
[linux-2.6-block.git] / drivers / lightnvm / pblk-read.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-read.c - pblk's read path
16 */
17
18#include "pblk.h"
19
20/*
21 * There is no guarantee that the value read from cache has not been updated and
22 * resides at another location in the cache. We guarantee though that if the
23 * value is read from the cache, it belongs to the mapped lba. In order to
24 * guarantee and order between writes and reads are ordered, a flush must be
25 * issued.
26 */
27static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28 sector_t lba, struct ppa_addr ppa,
75cb8e93 29 int bio_iter, bool advanced_bio)
a4bd217b 30{
880eda54 31#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
32 /* Callers must ensure that the ppa points to a cache address */
33 BUG_ON(pblk_ppa_empty(ppa));
34 BUG_ON(!pblk_addr_in_cache(ppa));
35#endif
36
75cb8e93
JG
37 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
38 bio_iter, advanced_bio);
a4bd217b
JG
39}
40
41static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
87cc40bb
JG
42 struct bio *bio, sector_t blba,
43 unsigned long *read_bitmap)
a4bd217b 44{
a4809fee 45 struct pblk_sec_meta *meta_list = rqd->meta_list;
a4bd217b 46 struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
a4bd217b 47 int nr_secs = rqd->nr_ppas;
75cb8e93 48 bool advanced_bio = false;
a4bd217b
JG
49 int i, j = 0;
50
a4bd217b
JG
51 pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
52
53 for (i = 0; i < nr_secs; i++) {
54 struct ppa_addr p = ppas[i];
55 sector_t lba = blba + i;
56
57retry:
58 if (pblk_ppa_empty(p)) {
59 WARN_ON(test_and_set_bit(i, read_bitmap));
a4809fee 60 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
75cb8e93
JG
61
62 if (unlikely(!advanced_bio)) {
63 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
64 advanced_bio = true;
65 }
66
67 goto next;
a4bd217b
JG
68 }
69
70 /* Try to read from write buffer. The address is later checked
71 * on the write buffer to prevent retrieving overwritten data.
72 */
73 if (pblk_addr_in_cache(p)) {
75cb8e93
JG
74 if (!pblk_read_from_cache(pblk, bio, lba, p, i,
75 advanced_bio)) {
a4bd217b
JG
76 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
77 goto retry;
78 }
79 WARN_ON(test_and_set_bit(i, read_bitmap));
a4809fee 80 meta_list[i].lba = cpu_to_le64(lba);
75cb8e93 81 advanced_bio = true;
880eda54 82#ifdef CONFIG_NVM_PBLK_DEBUG
db7ada33
JG
83 atomic_long_inc(&pblk->cache_reads);
84#endif
a4bd217b
JG
85 } else {
86 /* Read from media non-cached sectors */
87 rqd->ppa_list[j++] = p;
88 }
89
75cb8e93 90next:
a4bd217b
JG
91 if (advanced_bio)
92 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
93 }
94
f9c10152
JG
95 if (pblk_io_aligned(pblk, nr_secs))
96 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
97 else
98 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
99
880eda54 100#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
101 atomic_long_add(nr_secs, &pblk->inflight_reads);
102#endif
103}
104
a4bd217b 105
03a34b2d
JG
106static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
107 sector_t blba)
a4809fee 108{
03a34b2d
JG
109 struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
110 int nr_lbas = rqd->nr_ppas;
a4809fee
JG
111 int i;
112
113 for (i = 0; i < nr_lbas; i++) {
310df582 114 u64 lba = le64_to_cpu(meta_lba_list[i].lba);
a4809fee
JG
115
116 if (lba == ADDR_EMPTY)
117 continue;
118
03a34b2d 119 if (lba != blba + i) {
880eda54 120#ifdef CONFIG_NVM_PBLK_DEBUG
03a34b2d
JG
121 struct ppa_addr *p;
122
123 p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
124 print_ppa(&pblk->dev->geo, p, "seq", i);
125#endif
126 pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
127 lba, (u64)blba + i);
128 WARN_ON(1);
129 }
a4809fee
JG
130 }
131}
132
310df582
JG
133/*
134 * There can be holes in the lba list.
135 */
03a34b2d
JG
136static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
137 u64 *lba_list, int nr_lbas)
310df582 138{
03a34b2d 139 struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
310df582
JG
140 int i, j;
141
142 for (i = 0, j = 0; i < nr_lbas; i++) {
143 u64 lba = lba_list[i];
144 u64 meta_lba;
145
146 if (lba == ADDR_EMPTY)
147 continue;
148
03a34b2d 149 meta_lba = le64_to_cpu(meta_lba_list[j].lba);
310df582
JG
150
151 if (lba != meta_lba) {
880eda54 152#ifdef CONFIG_NVM_PBLK_DEBUG
03a34b2d
JG
153 struct ppa_addr *p;
154 int nr_ppas = rqd->nr_ppas;
155
156 p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
157 print_ppa(&pblk->dev->geo, p, "seq", j);
158#endif
310df582
JG
159 pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
160 lba, meta_lba);
161 WARN_ON(1);
162 }
03a34b2d
JG
163
164 j++;
310df582 165 }
03a34b2d
JG
166
167 WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
310df582
JG
168}
169
7bd4d370
JG
170static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
171{
172 struct ppa_addr *ppa_list;
173 int i;
174
175 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
176
177 for (i = 0; i < rqd->nr_ppas; i++) {
178 struct ppa_addr ppa = ppa_list[i];
179 struct pblk_line *line;
180
b1bcfda1 181 line = &pblk->lines[pblk_ppa_to_line(ppa)];
7bd4d370
JG
182 kref_put(&line->ref, pblk_line_put_wq);
183 }
184}
185
186static void pblk_end_user_read(struct bio *bio)
187{
880eda54 188#ifdef CONFIG_NVM_PBLK_DEBUG
7bd4d370
JG
189 WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
190#endif
191 bio_endio(bio);
7bd4d370
JG
192}
193
194static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
195 bool put_line)
a4bd217b 196{
998ba629 197 struct nvm_tgt_dev *dev = pblk->dev;
084ec9ba 198 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
87cc40bb 199 struct bio *int_bio = rqd->bio;
998ba629
JG
200 unsigned long start_time = r_ctx->start_time;
201
202 generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
a4bd217b
JG
203
204 if (rqd->error)
205 pblk_log_read_err(pblk, rqd);
a4bd217b 206
03a34b2d 207 pblk_read_check_seq(pblk, rqd, r_ctx->lba);
a4809fee 208
87cc40bb
JG
209 if (int_bio)
210 bio_put(int_bio);
084ec9ba 211
7bd4d370
JG
212 if (put_line)
213 pblk_read_put_rqd_kref(pblk, rqd);
a4bd217b 214
880eda54 215#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
216 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
217 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
218#endif
219
e2cddf20 220 pblk_free_rqd(pblk, rqd, PBLK_READ);
588726d3 221 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
222}
223
7bd4d370
JG
224static void pblk_end_io_read(struct nvm_rq *rqd)
225{
226 struct pblk *pblk = rqd->private;
87cc40bb
JG
227 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
228 struct bio *bio = (struct bio *)r_ctx->private;
7bd4d370 229
87cc40bb 230 pblk_end_user_read(bio);
7bd4d370
JG
231 __pblk_end_io_read(pblk, rqd, true);
232}
233
87cc40bb
JG
234static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
235 struct bio *orig_bio, unsigned int bio_init_idx,
236 unsigned long *read_bitmap)
a4bd217b 237{
a4809fee 238 struct pblk_sec_meta *meta_list = rqd->meta_list;
87cc40bb 239 struct bio *new_bio;
a4bd217b
JG
240 struct bio_vec src_bv, dst_bv;
241 void *ppa_ptr = NULL;
242 void *src_p, *dst_p;
243 dma_addr_t dma_ppa_list = 0;
a4809fee 244 __le64 *lba_list_mem, *lba_list_media;
a4bd217b
JG
245 int nr_secs = rqd->nr_ppas;
246 int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
247 int i, ret, hole;
a4bd217b 248
a4809fee
JG
249 /* Re-use allocated memory for intermediate lbas */
250 lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
251 lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
252
a4bd217b 253 new_bio = bio_alloc(GFP_KERNEL, nr_holes);
a4bd217b
JG
254
255 if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
87cc40bb 256 goto fail_add_pages;
a4bd217b
JG
257
258 if (nr_holes != new_bio->bi_vcnt) {
259 pr_err("pblk: malformed bio\n");
87cc40bb 260 goto fail;
a4bd217b
JG
261 }
262
a4809fee
JG
263 for (i = 0; i < nr_secs; i++)
264 lba_list_mem[i] = meta_list[i].lba;
265
a4bd217b
JG
266 new_bio->bi_iter.bi_sector = 0; /* internal bio */
267 bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
a4bd217b
JG
268
269 rqd->bio = new_bio;
270 rqd->nr_ppas = nr_holes;
f9c10152 271 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
a4bd217b 272
0f9248cf 273 if (unlikely(nr_holes == 1)) {
a4bd217b
JG
274 ppa_ptr = rqd->ppa_list;
275 dma_ppa_list = rqd->dma_ppa_list;
276 rqd->ppa_addr = rqd->ppa_list[0];
277 }
278
1a94b2d4 279 ret = pblk_submit_io_sync(pblk, rqd);
a4bd217b
JG
280 if (ret) {
281 bio_put(rqd->bio);
1a94b2d4 282 pr_err("pblk: sync read IO submission failed\n");
87cc40bb 283 goto fail;
a4bd217b
JG
284 }
285
a4bd217b
JG
286 if (rqd->error) {
287 atomic_long_inc(&pblk->read_failed);
880eda54 288#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
289 pblk_print_failed_rqd(pblk, rqd, rqd->error);
290#endif
291 }
292
0f9248cf 293 if (unlikely(nr_holes == 1)) {
7bd4d370
JG
294 struct ppa_addr ppa;
295
296 ppa = rqd->ppa_addr;
a4bd217b
JG
297 rqd->ppa_list = ppa_ptr;
298 rqd->dma_ppa_list = dma_ppa_list;
7bd4d370 299 rqd->ppa_list[0] = ppa;
a4bd217b
JG
300 }
301
a4809fee
JG
302 for (i = 0; i < nr_secs; i++) {
303 lba_list_media[i] = meta_list[i].lba;
304 meta_list[i].lba = lba_list_mem[i];
305 }
306
a4bd217b
JG
307 /* Fill the holes in the original bio */
308 i = 0;
309 hole = find_first_zero_bit(read_bitmap, nr_secs);
310 do {
b1bcfda1 311 int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
7bd4d370
JG
312 struct pblk_line *line = &pblk->lines[line_id];
313
314 kref_put(&line->ref, pblk_line_put);
315
a4809fee
JG
316 meta_list[hole].lba = lba_list_media[i];
317
a4bd217b 318 src_bv = new_bio->bi_io_vec[i++];
87cc40bb 319 dst_bv = orig_bio->bi_io_vec[bio_init_idx + hole];
a4bd217b
JG
320
321 src_p = kmap_atomic(src_bv.bv_page);
322 dst_p = kmap_atomic(dst_bv.bv_page);
323
324 memcpy(dst_p + dst_bv.bv_offset,
325 src_p + src_bv.bv_offset,
326 PBLK_EXPOSED_PAGE_SIZE);
327
328 kunmap_atomic(src_p);
329 kunmap_atomic(dst_p);
330
b906bbb6 331 mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
a4bd217b
JG
332
333 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
334 } while (hole < nr_secs);
335
336 bio_put(new_bio);
337
87cc40bb
JG
338 /* restore original request */
339 rqd->bio = NULL;
a4bd217b 340 rqd->nr_ppas = nr_secs;
a4bd217b 341
7bd4d370 342 __pblk_end_io_read(pblk, rqd, false);
87cc40bb 343 return NVM_IO_DONE;
a4bd217b 344
87cc40bb 345fail:
a4bd217b 346 /* Free allocated pages in new bio */
fbadca73 347 pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
87cc40bb 348fail_add_pages:
fbadca73 349 pr_err("pblk: failed to perform partial read\n");
7bd4d370 350 __pblk_end_io_read(pblk, rqd, false);
a4bd217b
JG
351 return NVM_IO_ERR;
352}
353
87cc40bb 354static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
84454e6d 355 sector_t lba, unsigned long *read_bitmap)
a4bd217b 356{
a4809fee 357 struct pblk_sec_meta *meta_list = rqd->meta_list;
a4bd217b 358 struct ppa_addr ppa;
a4bd217b
JG
359
360 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
361
880eda54 362#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
363 atomic_long_inc(&pblk->inflight_reads);
364#endif
365
366retry:
367 if (pblk_ppa_empty(ppa)) {
368 WARN_ON(test_and_set_bit(0, read_bitmap));
a4809fee 369 meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
a4bd217b
JG
370 return;
371 }
372
373 /* Try to read from write buffer. The address is later checked on the
374 * write buffer to prevent retrieving overwritten data.
375 */
376 if (pblk_addr_in_cache(ppa)) {
75cb8e93 377 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
a4bd217b
JG
378 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
379 goto retry;
380 }
a4809fee 381
7bd4d370 382 WARN_ON(test_and_set_bit(0, read_bitmap));
a4809fee
JG
383 meta_list[0].lba = cpu_to_le64(lba);
384
880eda54 385#ifdef CONFIG_NVM_PBLK_DEBUG
7bd4d370 386 atomic_long_inc(&pblk->cache_reads);
db7ada33 387#endif
a4bd217b
JG
388 } else {
389 rqd->ppa_addr = ppa;
390 }
f9c10152
JG
391
392 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
a4bd217b
JG
393}
394
395int pblk_submit_read(struct pblk *pblk, struct bio *bio)
396{
397 struct nvm_tgt_dev *dev = pblk->dev;
998ba629 398 struct request_queue *q = dev->q;
84454e6d 399 sector_t blba = pblk_get_lba(bio);
5bf1e1ee 400 unsigned int nr_secs = pblk_get_secs(bio);
a4809fee 401 struct pblk_g_ctx *r_ctx;
a4bd217b 402 struct nvm_rq *rqd;
a4bd217b 403 unsigned int bio_init_idx;
921aebfa 404 DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
a4bd217b
JG
405 int ret = NVM_IO_ERR;
406
84454e6d
JG
407 /* logic error: lba out-of-bounds. Ignore read request */
408 if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
409 WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
410 (unsigned long long)blba, nr_secs);
a4bd217b 411 return NVM_IO_ERR;
84454e6d 412 }
a4bd217b 413
998ba629
JG
414 generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
415
921aebfa 416 bitmap_zero(read_bitmap, nr_secs);
a4bd217b 417
e2cddf20 418 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
a4bd217b
JG
419
420 rqd->opcode = NVM_OP_PREAD;
a4bd217b 421 rqd->nr_ppas = nr_secs;
87cc40bb 422 rqd->bio = NULL; /* cloned bio if needed */
a4bd217b
JG
423 rqd->private = pblk;
424 rqd->end_io = pblk_end_io_read;
425
a4809fee 426 r_ctx = nvm_rq_to_pdu(rqd);
998ba629 427 r_ctx->start_time = jiffies;
a4809fee 428 r_ctx->lba = blba;
87cc40bb 429 r_ctx->private = bio; /* original bio */
a4809fee 430
a4bd217b
JG
431 /* Save the index for this bio's start. This is needed in case
432 * we need to fill a partial read.
433 */
434 bio_init_idx = pblk_get_bi_idx(bio);
435
63e3809c
JG
436 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
437 &rqd->dma_meta_list);
438 if (!rqd->meta_list) {
439 pr_err("pblk: not able to allocate ppa list\n");
440 goto fail_rqd_free;
441 }
442
a4bd217b 443 if (nr_secs > 1) {
63e3809c
JG
444 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
445 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
a4bd217b 446
921aebfa 447 pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
a4bd217b 448 } else {
921aebfa 449 pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
a4bd217b
JG
450 }
451
921aebfa 452 if (bitmap_full(read_bitmap, nr_secs)) {
588726d3 453 atomic_inc(&pblk->inflight_io);
7bd4d370 454 __pblk_end_io_read(pblk, rqd, false);
87cc40bb 455 return NVM_IO_DONE;
a4bd217b
JG
456 }
457
458 /* All sectors are to be read from the device */
921aebfa 459 if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
a4bd217b 460 struct bio *int_bio = NULL;
a4bd217b
JG
461
462 /* Clone read bio to deal with read errors internally */
b906bbb6 463 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
a4bd217b
JG
464 if (!int_bio) {
465 pr_err("pblk: could not clone read bio\n");
998ba629 466 goto fail_end_io;
a4bd217b
JG
467 }
468
469 rqd->bio = int_bio;
a4bd217b 470
e13f421b 471 if (pblk_submit_io(pblk, rqd)) {
a4bd217b 472 pr_err("pblk: read IO submission failed\n");
e13f421b 473 ret = NVM_IO_ERR;
998ba629 474 goto fail_end_io;
a4bd217b
JG
475 }
476
477 return NVM_IO_OK;
478 }
479
480 /* The read bio request could be partially filled by the write buffer,
481 * but there are some holes that need to be read from the drive.
482 */
921aebfa 483 return pblk_partial_read(pblk, rqd, bio, bio_init_idx, read_bitmap);
a4bd217b
JG
484
485fail_rqd_free:
e2cddf20 486 pblk_free_rqd(pblk, rqd, PBLK_READ);
a4bd217b 487 return ret;
998ba629
JG
488fail_end_io:
489 __pblk_end_io_read(pblk, rqd, false);
490 return ret;
a4bd217b
JG
491}
492
493static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
494 struct pblk_line *line, u64 *lba_list,
d340121e 495 u64 *paddr_list_gc, unsigned int nr_secs)
a4bd217b 496{
d340121e
JG
497 struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
498 struct ppa_addr ppa_gc;
a4bd217b
JG
499 int valid_secs = 0;
500 int i;
501
d340121e 502 pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
a4bd217b
JG
503
504 for (i = 0; i < nr_secs; i++) {
d340121e
JG
505 if (lba_list[i] == ADDR_EMPTY)
506 continue;
507
508 ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
509 if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
510 paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
a4bd217b
JG
511 continue;
512 }
513
d340121e 514 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
a4bd217b
JG
515 }
516
880eda54 517#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
518 atomic_long_add(valid_secs, &pblk->inflight_reads);
519#endif
d340121e 520
a4bd217b
JG
521 return valid_secs;
522}
523
524static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
d340121e
JG
525 struct pblk_line *line, sector_t lba,
526 u64 paddr_gc)
a4bd217b 527{
d340121e 528 struct ppa_addr ppa_l2p, ppa_gc;
a4bd217b
JG
529 int valid_secs = 0;
530
659226eb
DC
531 if (lba == ADDR_EMPTY)
532 goto out;
533
a4bd217b 534 /* logic error: lba out-of-bounds */
2a79efd8
DC
535 if (lba >= pblk->rl.nr_secs) {
536 WARN(1, "pblk: read lba out of bounds\n");
a4bd217b
JG
537 goto out;
538 }
539
a4bd217b 540 spin_lock(&pblk->trans_lock);
d340121e 541 ppa_l2p = pblk_trans_map_get(pblk, lba);
a4bd217b
JG
542 spin_unlock(&pblk->trans_lock);
543
d340121e
JG
544 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
545 if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
a4bd217b
JG
546 goto out;
547
d340121e 548 rqd->ppa_addr = ppa_l2p;
a4bd217b
JG
549 valid_secs = 1;
550
880eda54 551#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
552 atomic_long_inc(&pblk->inflight_reads);
553#endif
554
555out:
556 return valid_secs;
557}
558
d340121e 559int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
a4bd217b
JG
560{
561 struct nvm_tgt_dev *dev = pblk->dev;
562 struct nvm_geo *geo = &dev->geo;
a4bd217b
JG
563 struct bio *bio;
564 struct nvm_rq rqd;
d340121e
JG
565 int data_len;
566 int ret = NVM_IO_OK;
a4bd217b
JG
567
568 memset(&rqd, 0, sizeof(struct nvm_rq));
569
63e3809c
JG
570 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
571 &rqd.dma_meta_list);
572 if (!rqd.meta_list)
d340121e 573 return -ENOMEM;
63e3809c 574
d340121e 575 if (gc_rq->nr_secs > 1) {
63e3809c
JG
576 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
577 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
a4bd217b 578
d340121e
JG
579 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
580 gc_rq->lba_list,
581 gc_rq->paddr_list,
582 gc_rq->nr_secs);
583 if (gc_rq->secs_to_gc == 1)
63e3809c 584 rqd.ppa_addr = rqd.ppa_list[0];
a4bd217b 585 } else {
d340121e
JG
586 gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
587 gc_rq->lba_list[0],
588 gc_rq->paddr_list[0]);
a4bd217b
JG
589 }
590
d340121e 591 if (!(gc_rq->secs_to_gc))
a4bd217b
JG
592 goto out;
593
e46f4e48 594 data_len = (gc_rq->secs_to_gc) * geo->csecs;
d340121e 595 bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
7d327a9e 596 PBLK_VMALLOC_META, GFP_KERNEL);
a4bd217b
JG
597 if (IS_ERR(bio)) {
598 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
599 goto err_free_dma;
600 }
601
602 bio->bi_iter.bi_sector = 0; /* internal bio */
603 bio_set_op_attrs(bio, REQ_OP_READ, 0);
604
605 rqd.opcode = NVM_OP_PREAD;
d340121e 606 rqd.nr_ppas = gc_rq->secs_to_gc;
f9c10152 607 rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
a4bd217b
JG
608 rqd.bio = bio;
609
1a94b2d4 610 if (pblk_submit_io_sync(pblk, &rqd)) {
d340121e 611 ret = -EIO;
a4bd217b 612 pr_err("pblk: GC read request failed\n");
7d327a9e 613 goto err_free_bio;
a4bd217b
JG
614 }
615
03a34b2d 616 pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
310df582 617
588726d3 618 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
619
620 if (rqd.error) {
621 atomic_long_inc(&pblk->read_failed_gc);
880eda54 622#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
623 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
624#endif
625 }
626
880eda54 627#ifdef CONFIG_NVM_PBLK_DEBUG
d340121e
JG
628 atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
629 atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
630 atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
a4bd217b
JG
631#endif
632
633out:
63e3809c 634 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
d340121e 635 return ret;
a4bd217b 636
7d327a9e
JG
637err_free_bio:
638 bio_put(bio);
a4bd217b 639err_free_dma:
63e3809c 640 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
d340121e 641 return ret;
a4bd217b 642}