lightnvm: pblk: refactor read path on GC
[linux-2.6-block.git] / drivers / lightnvm / pblk-write.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-write.c - pblk's write path from write buffer to media
16 */
17
18#include "pblk.h"
19
a4bd217b
JG
20static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
21 struct pblk_c_ctx *c_ctx)
22{
23 struct nvm_tgt_dev *dev = pblk->dev;
24 struct bio *original_bio;
25 unsigned long ret;
26 int i;
27
28 for (i = 0; i < c_ctx->nr_valid; i++) {
29 struct pblk_w_ctx *w_ctx;
a4bd217b
JG
30
31 w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
a4bd217b
JG
32 while ((original_bio = bio_list_pop(&w_ctx->bios)))
33 bio_endio(original_bio);
34 }
35
cd8ddbf7
JG
36 if (c_ctx->nr_padded)
37 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
38 c_ctx->nr_padded);
39
a4bd217b 40#ifdef CONFIG_NVM_DEBUG
e0e12a70 41 atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
a4bd217b
JG
42#endif
43
44 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
45
56c76417 46 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
a4bd217b
JG
47
48 bio_put(rqd->bio);
49 pblk_free_rqd(pblk, rqd, WRITE);
50
51 return ret;
52}
53
54static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
55 struct nvm_rq *rqd,
56 struct pblk_c_ctx *c_ctx)
57{
58 list_del(&c_ctx->list);
59 return pblk_end_w_bio(pblk, rqd, c_ctx);
60}
61
62static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
63 struct pblk_c_ctx *c_ctx)
64{
65 struct pblk_c_ctx *c, *r;
66 unsigned long flags;
67 unsigned long pos;
68
69#ifdef CONFIG_NVM_DEBUG
70 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
71#endif
72
73 pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
74
75 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
76 if (pos == c_ctx->sentry) {
77 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
78
79retry:
80 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
81 rqd = nvm_rq_from_c_ctx(c);
82 if (c->sentry == pos) {
83 pos = pblk_end_queued_w_bio(pblk, rqd, c);
84 goto retry;
85 }
86 }
87 } else {
88 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
89 list_add_tail(&c_ctx->list, &pblk->compl_list);
90 }
91 pblk_rb_sync_end(&pblk->rwb, &flags);
92}
93
94/* When a write fails, we are not sure whether the block has grown bad or a page
95 * range is more susceptible to write errors. If a high number of pages fail, we
96 * assume that the block is bad and we mark it accordingly. In all cases, we
97 * remap and resubmit the failed entries as fast as possible; if a flush is
98 * waiting on a completion, the whole stack would stall otherwise.
99 */
100static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
101{
102 void *comp_bits = &rqd->ppa_status;
103 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
104 struct pblk_rec_ctx *recovery;
105 struct ppa_addr *ppa_list = rqd->ppa_list;
106 int nr_ppas = rqd->nr_ppas;
107 unsigned int c_entries;
108 int bit, ret;
109
110 if (unlikely(nr_ppas == 1))
111 ppa_list = &rqd->ppa_addr;
112
113 recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
2942f50f 114
a4bd217b
JG
115 INIT_LIST_HEAD(&recovery->failed);
116
117 bit = -1;
118 while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
119 struct pblk_rb_entry *entry;
120 struct ppa_addr ppa;
121
122 /* Logic error */
123 if (bit > c_ctx->nr_valid) {
2a79efd8 124 WARN_ONCE(1, "pblk: corrupted write request\n");
33db9fd4 125 mempool_free(recovery, pblk->rec_pool);
a4bd217b
JG
126 goto out;
127 }
128
129 ppa = ppa_list[bit];
130 entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
131 if (!entry) {
132 pr_err("pblk: could not scan entry on write failure\n");
33db9fd4 133 mempool_free(recovery, pblk->rec_pool);
a4bd217b
JG
134 goto out;
135 }
136
137 /* The list is filled first and emptied afterwards. No need for
138 * protecting it with a lock
139 */
140 list_add_tail(&entry->index, &recovery->failed);
141 }
142
143 c_entries = find_first_bit(comp_bits, nr_ppas);
144 ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
145 if (ret) {
146 pr_err("pblk: could not recover from write failure\n");
33db9fd4 147 mempool_free(recovery, pblk->rec_pool);
a4bd217b
JG
148 goto out;
149 }
150
151 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
ef576494 152 queue_work(pblk->close_wq, &recovery->ws_rec);
a4bd217b
JG
153
154out:
155 pblk_complete_write(pblk, rqd, c_ctx);
156}
157
158static void pblk_end_io_write(struct nvm_rq *rqd)
159{
160 struct pblk *pblk = rqd->private;
161 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
162
163 if (rqd->error) {
164 pblk_log_write_err(pblk, rqd);
165 return pblk_end_w_fail(pblk, rqd);
166 }
167#ifdef CONFIG_NVM_DEBUG
168 else
4e4cbee9 169 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
a4bd217b
JG
170#endif
171
172 pblk_complete_write(pblk, rqd, c_ctx);
588726d3 173 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
174}
175
dd2a4343
JG
176static void pblk_end_io_write_meta(struct nvm_rq *rqd)
177{
178 struct pblk *pblk = rqd->private;
179 struct nvm_tgt_dev *dev = pblk->dev;
dd2a4343
JG
180 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
181 struct pblk_line *line = m_ctx->private;
182 struct pblk_emeta *emeta = line->emeta;
dd2a4343
JG
183 int sync;
184
3eaa11e2 185 pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
dd2a4343
JG
186
187 if (rqd->error) {
188 pblk_log_write_err(pblk, rqd);
ee8d5c1a 189 pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
dd2a4343
JG
190 }
191#ifdef CONFIG_NVM_DEBUG
192 else
193 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
194#endif
195
196 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
197 if (sync == emeta->nr_entries)
b84ae4a8
JG
198 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
199 GFP_ATOMIC, pblk->close_wq);
dd2a4343
JG
200
201 bio_put(rqd->bio);
3eaa11e2 202 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
dd2a4343 203 pblk_free_rqd(pblk, rqd, READ);
588726d3
JG
204
205 atomic_dec(&pblk->inflight_io);
dd2a4343
JG
206}
207
a4bd217b 208static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
dd2a4343
JG
209 unsigned int nr_secs,
210 nvm_end_io_fn(*end_io))
a4bd217b
JG
211{
212 struct nvm_tgt_dev *dev = pblk->dev;
213
214 /* Setup write request */
215 rqd->opcode = NVM_OP_PWRITE;
216 rqd->nr_ppas = nr_secs;
217 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
218 rqd->private = pblk;
dd2a4343 219 rqd->end_io = end_io;
a4bd217b
JG
220
221 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
222 &rqd->dma_meta_list);
223 if (!rqd->meta_list)
224 return -ENOMEM;
225
a4bd217b
JG
226 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
227 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
228
229 return 0;
230}
231
232static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
d624f371 233 struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
a4bd217b
JG
234{
235 struct pblk_line_meta *lm = &pblk->lm;
d624f371 236 struct pblk_line *e_line = pblk_line_get_erase(pblk);
a4bd217b
JG
237 unsigned int valid = c_ctx->nr_valid;
238 unsigned int padded = c_ctx->nr_padded;
239 unsigned int nr_secs = valid + padded;
240 unsigned long *lun_bitmap;
241 int ret = 0;
242
243 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
d624f371
JG
244 if (!lun_bitmap)
245 return -ENOMEM;
a4bd217b
JG
246 c_ctx->lun_bitmap = lun_bitmap;
247
dd2a4343 248 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
a4bd217b
JG
249 if (ret) {
250 kfree(lun_bitmap);
d624f371 251 return ret;
a4bd217b
JG
252 }
253
588726d3 254 if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
a4bd217b
JG
255 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
256 else
257 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
d624f371 258 valid, erase_ppa);
a4bd217b 259
d624f371 260 return 0;
a4bd217b
JG
261}
262
263int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
264 struct pblk_c_ctx *c_ctx)
265{
266 struct pblk_line_meta *lm = &pblk->lm;
267 unsigned long *lun_bitmap;
268 int ret;
269
270 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
271 if (!lun_bitmap)
272 return -ENOMEM;
273
274 c_ctx->lun_bitmap = lun_bitmap;
275
dd2a4343 276 ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
a4bd217b
JG
277 if (ret)
278 return ret;
279
280 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
281
282 rqd->ppa_status = (u64)0;
283 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
284
285 return ret;
286}
287
288static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
289 unsigned int secs_to_flush)
290{
291 int secs_to_sync;
292
293 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
294
295#ifdef CONFIG_NVM_DEBUG
296 if ((!secs_to_sync && secs_to_flush)
297 || (secs_to_sync < 0)
298 || (secs_to_sync > secs_avail && !secs_to_flush)) {
299 pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
300 secs_avail, secs_to_sync, secs_to_flush);
301 }
302#endif
303
304 return secs_to_sync;
305}
306
dd2a4343
JG
307static inline int pblk_valid_meta_ppa(struct pblk *pblk,
308 struct pblk_line *meta_line,
309 struct ppa_addr *ppa_list, int nr_ppas)
310{
311 struct nvm_tgt_dev *dev = pblk->dev;
312 struct nvm_geo *geo = &dev->geo;
313 struct pblk_line *data_line;
314 struct ppa_addr ppa, ppa_opt;
315 u64 paddr;
316 int i;
317
318 data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])];
319 paddr = pblk_lookup_page(pblk, meta_line);
320 ppa = addr_to_gen_ppa(pblk, paddr, 0);
321
322 if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap))
323 return 1;
324
325 /* Schedule a metadata I/O that is half the distance from the data I/O
326 * with regards to the number of LUNs forming the pblk instance. This
327 * balances LUN conflicts across every I/O.
328 *
329 * When the LUN configuration changes (e.g., due to GC), this distance
330 * can align, which would result on a LUN deadlock. In this case, modify
331 * the distance to not be optimal, but allow metadata I/Os to succeed.
332 */
333 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
334 if (unlikely(ppa_opt.ppa == ppa.ppa)) {
335 data_line->meta_distance--;
336 return 0;
337 }
338
339 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
340 if (ppa_list[i].g.ch == ppa_opt.g.ch &&
341 ppa_list[i].g.lun == ppa_opt.g.lun)
342 return 1;
343
344 if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) {
345 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
346 if (ppa_list[i].g.ch == ppa.g.ch &&
347 ppa_list[i].g.lun == ppa.g.lun)
348 return 0;
349
350 return 1;
351 }
352
353 return 0;
354}
355
356int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
357{
358 struct nvm_tgt_dev *dev = pblk->dev;
359 struct nvm_geo *geo = &dev->geo;
360 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
361 struct pblk_line_meta *lm = &pblk->lm;
362 struct pblk_emeta *emeta = meta_line->emeta;
363 struct pblk_g_ctx *m_ctx;
dd2a4343
JG
364 struct bio *bio;
365 struct nvm_rq *rqd;
366 void *data;
367 u64 paddr;
368 int rq_ppas = pblk->min_write_pgs;
369 int id = meta_line->id;
370 int rq_len;
371 int i, j;
372 int ret;
373
374 rqd = pblk_alloc_rqd(pblk, READ);
2942f50f 375
dd2a4343
JG
376 m_ctx = nvm_rq_to_pdu(rqd);
377 m_ctx->private = meta_line;
378
379 rq_len = rq_ppas * geo->sec_size;
380 data = ((void *)emeta->buf) + emeta->mem;
381
de54e703
JG
382 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
383 l_mg->emeta_alloc_type, GFP_KERNEL);
dd2a4343
JG
384 if (IS_ERR(bio)) {
385 ret = PTR_ERR(bio);
386 goto fail_free_rqd;
387 }
388 bio->bi_iter.bi_sector = 0; /* internal bio */
389 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
390 rqd->bio = bio;
391
392 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
393 if (ret)
394 goto fail_free_bio;
395
396 for (i = 0; i < rqd->nr_ppas; ) {
397 spin_lock(&meta_line->lock);
398 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
399 spin_unlock(&meta_line->lock);
400 for (j = 0; j < rq_ppas; j++, i++, paddr++)
401 rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
402 }
403
dd2a4343
JG
404 emeta->mem += rq_len;
405 if (emeta->mem >= lm->emeta_len[0]) {
406 spin_lock(&l_mg->close_lock);
407 list_del(&meta_line->list);
dd2a4343
JG
408 spin_unlock(&l_mg->close_lock);
409 }
410
3eaa11e2
JG
411 pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
412
dd2a4343
JG
413 ret = pblk_submit_io(pblk, rqd);
414 if (ret) {
415 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
416 goto fail_rollback;
417 }
418
419 return NVM_IO_OK;
420
421fail_rollback:
3eaa11e2 422 pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
dd2a4343
JG
423 spin_lock(&l_mg->close_lock);
424 pblk_dealloc_page(pblk, meta_line, rq_ppas);
425 list_add(&meta_line->list, &meta_line->list);
426 spin_unlock(&l_mg->close_lock);
3eaa11e2
JG
427
428 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
dd2a4343 429fail_free_bio:
f680f19a
JG
430 if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META))
431 bio_put(bio);
dd2a4343
JG
432fail_free_rqd:
433 pblk_free_rqd(pblk, rqd, READ);
434 return ret;
435}
436
437static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
438 int prev_n)
439{
440 struct pblk_line_meta *lm = &pblk->lm;
441 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
442 struct pblk_line *meta_line;
443
444 spin_lock(&l_mg->close_lock);
445retry:
446 if (list_empty(&l_mg->emeta_list)) {
447 spin_unlock(&l_mg->close_lock);
448 return 0;
449 }
450 meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
e72ec1d3 451 if (meta_line->emeta->mem >= lm->emeta_len[0])
dd2a4343
JG
452 goto retry;
453 spin_unlock(&l_mg->close_lock);
454
455 if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n))
456 return 0;
457
458 return pblk_submit_meta_io(pblk, meta_line);
459}
460
d624f371
JG
461static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
462{
463 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
464 struct ppa_addr erase_ppa;
465 int err;
466
467 ppa_set_empty(&erase_ppa);
468
469 /* Assign lbas to ppas and populate request structure */
470 err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
471 if (err) {
472 pr_err("pblk: could not setup write request: %d\n", err);
473 return NVM_IO_ERR;
474 }
475
dd2a4343
JG
476 if (likely(ppa_empty(erase_ppa))) {
477 /* Submit metadata write for previous data line */
478 err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas);
479 if (err) {
480 pr_err("pblk: metadata I/O submission failed: %d", err);
481 return NVM_IO_ERR;
482 }
483
484 /* Submit data write for current data line */
485 err = pblk_submit_io(pblk, rqd);
486 if (err) {
487 pr_err("pblk: data I/O submission failed: %d\n", err);
488 return NVM_IO_ERR;
489 }
490 } else {
491 /* Submit data write for current data line */
492 err = pblk_submit_io(pblk, rqd);
493 if (err) {
494 pr_err("pblk: data I/O submission failed: %d\n", err);
495 return NVM_IO_ERR;
496 }
d624f371 497
dd2a4343
JG
498 /* Submit available erase for next data line */
499 if (pblk_blk_erase_async(pblk, erase_ppa)) {
500 struct pblk_line *e_line = pblk_line_get_erase(pblk);
501 struct nvm_tgt_dev *dev = pblk->dev;
502 struct nvm_geo *geo = &dev->geo;
503 int bit;
504
505 atomic_inc(&e_line->left_eblks);
506 bit = pblk_ppa_to_pos(geo, erase_ppa);
507 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
508 }
d624f371
JG
509 }
510
511 return NVM_IO_OK;
512}
513
514static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
515{
516 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
517 struct bio *bio = rqd->bio;
518
519 if (c_ctx->nr_padded)
cd8ddbf7
JG
520 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
521 c_ctx->nr_padded);
d624f371
JG
522}
523
a4bd217b
JG
524static int pblk_submit_write(struct pblk *pblk)
525{
526 struct bio *bio;
527 struct nvm_rq *rqd;
a4bd217b
JG
528 unsigned int secs_avail, secs_to_sync, secs_to_com;
529 unsigned int secs_to_flush;
530 unsigned long pos;
a4bd217b
JG
531
532 /* If there are no sectors in the cache, flushes (bios without data)
533 * will be cleared on the cache threads
534 */
535 secs_avail = pblk_rb_read_count(&pblk->rwb);
536 if (!secs_avail)
537 return 1;
538
539 secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
540 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
541 return 1;
542
a4bd217b 543 bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
2942f50f 544
a4bd217b
JG
545 bio->bi_iter.bi_sector = 0; /* internal bio */
546 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2942f50f
JG
547
548 rqd = pblk_alloc_rqd(pblk, WRITE);
a4bd217b
JG
549 rqd->bio = bio;
550
551 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
552 if (secs_to_sync > pblk->max_write_pgs) {
553 pr_err("pblk: bad buffer sync calculation\n");
554 goto fail_put_bio;
555 }
556
557 secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
558 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
559
d624f371
JG
560 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
561 secs_avail)) {
a4bd217b
JG
562 pr_err("pblk: corrupted write bio\n");
563 goto fail_put_bio;
564 }
565
d624f371 566 if (pblk_submit_io_set(pblk, rqd))
a4bd217b 567 goto fail_free_bio;
a4bd217b
JG
568
569#ifdef CONFIG_NVM_DEBUG
570 atomic_long_add(secs_to_sync, &pblk->sub_writes);
571#endif
572
573 return 0;
574
575fail_free_bio:
d624f371 576 pblk_free_write_rqd(pblk, rqd);
a4bd217b
JG
577fail_put_bio:
578 bio_put(bio);
a4bd217b
JG
579 pblk_free_rqd(pblk, rqd, WRITE);
580
581 return 1;
582}
583
584int pblk_write_ts(void *data)
585{
586 struct pblk *pblk = data;
587
588 while (!kthread_should_stop()) {
589 if (!pblk_submit_write(pblk))
590 continue;
591 set_current_state(TASK_INTERRUPTIBLE);
592 io_schedule();
593 }
594
595 return 0;
596}