lightnvm: pblk: remove unused parameters in pblk_up_rq
[linux-2.6-block.git] / drivers / lightnvm / pblk-write.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-write.c - pblk's write path from write buffer to media
16 */
17
18#include "pblk.h"
19
a4bd217b
JG
20static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
21 struct pblk_c_ctx *c_ctx)
22{
a4bd217b 23 struct bio *original_bio;
533657c1 24 struct pblk_rb *rwb = &pblk->rwb;
a4bd217b
JG
25 unsigned long ret;
26 int i;
27
28 for (i = 0; i < c_ctx->nr_valid; i++) {
29 struct pblk_w_ctx *w_ctx;
533657c1
HH
30 int pos = c_ctx->sentry + i;
31 int flags;
32
33 w_ctx = pblk_rb_w_ctx(rwb, pos);
34 flags = READ_ONCE(w_ctx->flags);
35
36 if (flags & PBLK_FLUSH_ENTRY) {
37 flags &= ~PBLK_FLUSH_ENTRY;
38 /* Release flags on context. Protect from writes */
39 smp_store_release(&w_ctx->flags, flags);
40
880eda54 41#ifdef CONFIG_NVM_PBLK_DEBUG
533657c1
HH
42 atomic_dec(&rwb->inflight_flush_point);
43#endif
44 }
a4bd217b 45
a4bd217b
JG
46 while ((original_bio = bio_list_pop(&w_ctx->bios)))
47 bio_endio(original_bio);
48 }
49
cd8ddbf7
JG
50 if (c_ctx->nr_padded)
51 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
52 c_ctx->nr_padded);
53
880eda54 54#ifdef CONFIG_NVM_PBLK_DEBUG
e0e12a70 55 atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
a4bd217b
JG
56#endif
57
58 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
59
a4bd217b 60 bio_put(rqd->bio);
e2cddf20 61 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
a4bd217b
JG
62
63 return ret;
64}
65
66static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
67 struct nvm_rq *rqd,
68 struct pblk_c_ctx *c_ctx)
69{
70 list_del(&c_ctx->list);
71 return pblk_end_w_bio(pblk, rqd, c_ctx);
72}
73
74static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
75 struct pblk_c_ctx *c_ctx)
76{
77 struct pblk_c_ctx *c, *r;
78 unsigned long flags;
79 unsigned long pos;
80
880eda54 81#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
82 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
83#endif
e99e802f 84 pblk_up_rq(pblk, c_ctx->lun_bitmap);
a4bd217b
JG
85
86 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
87 if (pos == c_ctx->sentry) {
88 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
89
90retry:
91 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
92 rqd = nvm_rq_from_c_ctx(c);
93 if (c->sentry == pos) {
94 pos = pblk_end_queued_w_bio(pblk, rqd, c);
95 goto retry;
96 }
97 }
98 } else {
99 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
100 list_add_tail(&c_ctx->list, &pblk->compl_list);
101 }
102 pblk_rb_sync_end(&pblk->rwb, &flags);
103}
104
6a3abf5b
HH
105/* Map remaining sectors in chunk, starting from ppa */
106static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
a4bd217b 107{
6a3abf5b
HH
108 struct nvm_tgt_dev *dev = pblk->dev;
109 struct nvm_geo *geo = &dev->geo;
110 struct pblk_line *line;
111 struct ppa_addr map_ppa = *ppa;
112 u64 paddr;
113 int done = 0;
a4bd217b 114
cb21665c 115 line = pblk_ppa_to_line(pblk, *ppa);
6a3abf5b 116 spin_lock(&line->lock);
a4bd217b 117
6a3abf5b
HH
118 while (!done) {
119 paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
2942f50f 120
6a3abf5b
HH
121 if (!test_and_set_bit(paddr, line->map_bitmap))
122 line->left_msecs--;
a4bd217b 123
6a3abf5b
HH
124 if (!test_and_set_bit(paddr, line->invalid_bitmap))
125 le32_add_cpu(line->vsc, -1);
a4bd217b 126
6a3abf5b
HH
127 if (geo->version == NVM_OCSSD_SPEC_12) {
128 map_ppa.ppa++;
129 if (map_ppa.g.pg == geo->num_pg)
130 done = 1;
131 } else {
132 map_ppa.m.sec++;
133 if (map_ppa.m.sec == geo->clba)
134 done = 1;
a4bd217b 135 }
6a3abf5b 136 }
a4bd217b 137
48b8d208 138 line->w_err_gc->has_write_err = 1;
6a3abf5b
HH
139 spin_unlock(&line->lock);
140}
141
142static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
143 unsigned int nr_entries)
144{
145 struct pblk_rb *rb = &pblk->rwb;
146 struct pblk_rb_entry *entry;
147 struct pblk_line *line;
148 struct pblk_w_ctx *w_ctx;
149 struct ppa_addr ppa_l2p;
150 int flags;
151 unsigned int pos, i;
152
153 spin_lock(&pblk->trans_lock);
154 pos = sentry;
155 for (i = 0; i < nr_entries; i++) {
156 entry = &rb->entries[pos];
157 w_ctx = &entry->w_ctx;
158
159 /* Check if the lba has been overwritten */
160 ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
161 if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
162 w_ctx->lba = ADDR_EMPTY;
163
164 /* Mark up the entry as submittable again */
165 flags = READ_ONCE(w_ctx->flags);
166 flags |= PBLK_WRITTEN_DATA;
167 /* Release flags on write context. Protect from writes */
168 smp_store_release(&w_ctx->flags, flags);
a4bd217b 169
2e696f90 170 /* Decrease the reference count to the line as we will
6a3abf5b 171 * re-map these entries
a4bd217b 172 */
cb21665c 173 line = pblk_ppa_to_line(pblk, w_ctx->ppa);
6a3abf5b
HH
174 kref_put(&line->ref, pblk_line_put);
175
176 pos = (pos + 1) & (rb->nr_entries - 1);
a4bd217b 177 }
6a3abf5b
HH
178 spin_unlock(&pblk->trans_lock);
179}
a4bd217b 180
6a3abf5b
HH
181static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
182{
183 struct pblk_c_ctx *r_ctx;
184
185 r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
186 if (!r_ctx)
187 return;
188
189 r_ctx->lun_bitmap = NULL;
190 r_ctx->sentry = c_ctx->sentry;
191 r_ctx->nr_valid = c_ctx->nr_valid;
192 r_ctx->nr_padded = c_ctx->nr_padded;
193
194 spin_lock(&pblk->resubmit_lock);
195 list_add_tail(&r_ctx->list, &pblk->resubmit_list);
196 spin_unlock(&pblk->resubmit_lock);
197
880eda54 198#ifdef CONFIG_NVM_PBLK_DEBUG
6a3abf5b
HH
199 atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
200#endif
201}
202
203static void pblk_submit_rec(struct work_struct *work)
204{
205 struct pblk_rec_ctx *recovery =
206 container_of(work, struct pblk_rec_ctx, ws_rec);
207 struct pblk *pblk = recovery->pblk;
208 struct nvm_rq *rqd = recovery->rqd;
209 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
d68a9344 210 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
6a3abf5b
HH
211
212 pblk_log_write_err(pblk, rqd);
213
6a3abf5b
HH
214 pblk_map_remaining(pblk, ppa_list);
215 pblk_queue_resubmit(pblk, c_ctx);
216
e99e802f 217 pblk_up_rq(pblk, c_ctx->lun_bitmap);
6a3abf5b
HH
218 if (c_ctx->nr_padded)
219 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
220 c_ctx->nr_padded);
221 bio_put(rqd->bio);
222 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
223 mempool_free(recovery, &pblk->rec_pool);
224
225 atomic_dec(&pblk->inflight_io);
226}
227
228
229static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
230{
231 struct pblk_rec_ctx *recovery;
232
233 recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
234 if (!recovery) {
4e495a46 235 pblk_err(pblk, "could not allocate recovery work\n");
6a3abf5b 236 return;
a4bd217b
JG
237 }
238
6a3abf5b
HH
239 recovery->pblk = pblk;
240 recovery->rqd = rqd;
241
a4bd217b 242 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
ef576494 243 queue_work(pblk->close_wq, &recovery->ws_rec);
a4bd217b
JG
244}
245
246static void pblk_end_io_write(struct nvm_rq *rqd)
247{
248 struct pblk *pblk = rqd->private;
249 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
250
251 if (rqd->error) {
6a3abf5b
HH
252 pblk_end_w_fail(pblk, rqd);
253 return;
a4bd217b 254 }
880eda54 255#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b 256 else
4e4cbee9 257 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
a4bd217b
JG
258#endif
259
260 pblk_complete_write(pblk, rqd, c_ctx);
588726d3 261 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
262}
263
dd2a4343
JG
264static void pblk_end_io_write_meta(struct nvm_rq *rqd)
265{
266 struct pblk *pblk = rqd->private;
dd2a4343
JG
267 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
268 struct pblk_line *line = m_ctx->private;
269 struct pblk_emeta *emeta = line->emeta;
d68a9344 270 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
dd2a4343
JG
271 int sync;
272
d68a9344 273 pblk_up_page(pblk, ppa_list, rqd->nr_ppas);
dd2a4343
JG
274
275 if (rqd->error) {
276 pblk_log_write_err(pblk, rqd);
4e495a46 277 pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
48b8d208 278 line->w_err_gc->has_write_err = 1;
dd2a4343 279 }
dd2a4343
JG
280
281 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
282 if (sync == emeta->nr_entries)
b84ae4a8
JG
283 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
284 GFP_ATOMIC, pblk->close_wq);
dd2a4343 285
e2cddf20 286 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
588726d3
JG
287
288 atomic_dec(&pblk->inflight_io);
dd2a4343
JG
289}
290
a4bd217b 291static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
dd2a4343
JG
292 unsigned int nr_secs,
293 nvm_end_io_fn(*end_io))
a4bd217b
JG
294{
295 struct nvm_tgt_dev *dev = pblk->dev;
296
297 /* Setup write request */
298 rqd->opcode = NVM_OP_PWRITE;
299 rqd->nr_ppas = nr_secs;
d7b68016 300 rqd->is_seq = 1;
a4bd217b 301 rqd->private = pblk;
dd2a4343 302 rqd->end_io = end_io;
a4bd217b
JG
303
304 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
305 &rqd->dma_meta_list);
306 if (!rqd->meta_list)
307 return -ENOMEM;
308
a4bd217b
JG
309 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
310 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
311
312 return 0;
313}
314
315static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
1e82123d 316 struct ppa_addr *erase_ppa)
a4bd217b
JG
317{
318 struct pblk_line_meta *lm = &pblk->lm;
d624f371 319 struct pblk_line *e_line = pblk_line_get_erase(pblk);
1e82123d 320 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
a4bd217b
JG
321 unsigned int valid = c_ctx->nr_valid;
322 unsigned int padded = c_ctx->nr_padded;
323 unsigned int nr_secs = valid + padded;
324 unsigned long *lun_bitmap;
1e82123d 325 int ret;
a4bd217b
JG
326
327 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
d624f371
JG
328 if (!lun_bitmap)
329 return -ENOMEM;
a4bd217b
JG
330 c_ctx->lun_bitmap = lun_bitmap;
331
dd2a4343 332 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
a4bd217b
JG
333 if (ret) {
334 kfree(lun_bitmap);
d624f371 335 return ret;
a4bd217b
JG
336 }
337
588726d3 338 if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
a4bd217b
JG
339 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
340 else
341 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
d624f371 342 valid, erase_ppa);
a4bd217b 343
d624f371 344 return 0;
a4bd217b
JG
345}
346
a4bd217b
JG
347static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
348 unsigned int secs_to_flush)
349{
350 int secs_to_sync;
351
352 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
353
880eda54 354#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
355 if ((!secs_to_sync && secs_to_flush)
356 || (secs_to_sync < 0)
357 || (secs_to_sync > secs_avail && !secs_to_flush)) {
4e495a46 358 pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
a4bd217b
JG
359 secs_avail, secs_to_sync, secs_to_flush);
360 }
361#endif
362
363 return secs_to_sync;
364}
365
dd2a4343
JG
366int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
367{
368 struct nvm_tgt_dev *dev = pblk->dev;
369 struct nvm_geo *geo = &dev->geo;
370 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
371 struct pblk_line_meta *lm = &pblk->lm;
372 struct pblk_emeta *emeta = meta_line->emeta;
d68a9344 373 struct ppa_addr *ppa_list;
dd2a4343 374 struct pblk_g_ctx *m_ctx;
dd2a4343
JG
375 struct bio *bio;
376 struct nvm_rq *rqd;
377 void *data;
378 u64 paddr;
379 int rq_ppas = pblk->min_write_pgs;
380 int id = meta_line->id;
381 int rq_len;
382 int i, j;
383 int ret;
384
e2cddf20 385 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
2942f50f 386
dd2a4343
JG
387 m_ctx = nvm_rq_to_pdu(rqd);
388 m_ctx->private = meta_line;
389
e46f4e48 390 rq_len = rq_ppas * geo->csecs;
dd2a4343
JG
391 data = ((void *)emeta->buf) + emeta->mem;
392
de54e703
JG
393 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
394 l_mg->emeta_alloc_type, GFP_KERNEL);
dd2a4343 395 if (IS_ERR(bio)) {
4e495a46 396 pblk_err(pblk, "failed to map emeta io");
dd2a4343
JG
397 ret = PTR_ERR(bio);
398 goto fail_free_rqd;
399 }
400 bio->bi_iter.bi_sector = 0; /* internal bio */
401 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
402 rqd->bio = bio;
403
404 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
405 if (ret)
406 goto fail_free_bio;
407
d68a9344 408 ppa_list = nvm_rq_to_ppa_list(rqd);
dd2a4343
JG
409 for (i = 0; i < rqd->nr_ppas; ) {
410 spin_lock(&meta_line->lock);
411 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
412 spin_unlock(&meta_line->lock);
413 for (j = 0; j < rq_ppas; j++, i++, paddr++)
d68a9344 414 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
dd2a4343
JG
415 }
416
d8adaa3b 417 spin_lock(&l_mg->close_lock);
dd2a4343 418 emeta->mem += rq_len;
d8adaa3b 419 if (emeta->mem >= lm->emeta_len[0])
dd2a4343 420 list_del(&meta_line->list);
d8adaa3b 421 spin_unlock(&l_mg->close_lock);
dd2a4343 422
d68a9344 423 pblk_down_page(pblk, ppa_list, rqd->nr_ppas);
3eaa11e2 424
dd2a4343
JG
425 ret = pblk_submit_io(pblk, rqd);
426 if (ret) {
4e495a46 427 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
dd2a4343
JG
428 goto fail_rollback;
429 }
430
431 return NVM_IO_OK;
432
433fail_rollback:
d68a9344 434 pblk_up_page(pblk, ppa_list, rqd->nr_ppas);
dd2a4343
JG
435 spin_lock(&l_mg->close_lock);
436 pblk_dealloc_page(pblk, meta_line, rq_ppas);
437 list_add(&meta_line->list, &meta_line->list);
438 spin_unlock(&l_mg->close_lock);
439fail_free_bio:
55e836d4 440 bio_put(bio);
dd2a4343 441fail_free_rqd:
e2cddf20 442 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
dd2a4343
JG
443 return ret;
444}
445
1e82123d
JG
446static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
447 struct pblk_line *meta_line,
448 struct nvm_rq *data_rqd)
449{
450 struct nvm_tgt_dev *dev = pblk->dev;
451 struct nvm_geo *geo = &dev->geo;
452 struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
453 struct pblk_line *data_line = pblk_line_get_data(pblk);
454 struct ppa_addr ppa, ppa_opt;
455 u64 paddr;
456 int pos_opt;
457
458 /* Schedule a metadata I/O that is half the distance from the data I/O
459 * with regards to the number of LUNs forming the pblk instance. This
460 * balances LUN conflicts across every I/O.
461 *
462 * When the LUN configuration changes (e.g., due to GC), this distance
463 * can align, which would result on metadata and data I/Os colliding. In
464 * this case, modify the distance to not be optimal, but move the
465 * optimal in the right direction.
466 */
467 paddr = pblk_lookup_page(pblk, meta_line);
468 ppa = addr_to_gen_ppa(pblk, paddr, 0);
469 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
470 pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
471
472 if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
473 test_bit(pos_opt, data_line->blk_bitmap))
474 return true;
475
476 if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
477 data_line->meta_distance--;
478
479 return false;
480}
481
482static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
483 struct nvm_rq *data_rqd)
dd2a4343
JG
484{
485 struct pblk_line_meta *lm = &pblk->lm;
486 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
487 struct pblk_line *meta_line;
488
489 spin_lock(&l_mg->close_lock);
dd2a4343
JG
490 if (list_empty(&l_mg->emeta_list)) {
491 spin_unlock(&l_mg->close_lock);
1e82123d 492 return NULL;
dd2a4343
JG
493 }
494 meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
d8adaa3b
JG
495 if (meta_line->emeta->mem >= lm->emeta_len[0]) {
496 spin_unlock(&l_mg->close_lock);
497 return NULL;
498 }
dd2a4343
JG
499 spin_unlock(&l_mg->close_lock);
500
1e82123d
JG
501 if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
502 return NULL;
dd2a4343 503
1e82123d 504 return meta_line;
dd2a4343
JG
505}
506
d624f371
JG
507static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
508{
d624f371 509 struct ppa_addr erase_ppa;
1e82123d 510 struct pblk_line *meta_line;
d624f371
JG
511 int err;
512
26f76dce 513 pblk_ppa_set_empty(&erase_ppa);
d624f371
JG
514
515 /* Assign lbas to ppas and populate request structure */
1e82123d 516 err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
d624f371 517 if (err) {
4e495a46 518 pblk_err(pblk, "could not setup write request: %d\n", err);
d624f371
JG
519 return NVM_IO_ERR;
520 }
521
1e82123d 522 meta_line = pblk_should_submit_meta_io(pblk, rqd);
dd2a4343 523
1e82123d
JG
524 /* Submit data write for current data line */
525 err = pblk_submit_io(pblk, rqd);
526 if (err) {
4e495a46 527 pblk_err(pblk, "data I/O submission failed: %d\n", err);
1e82123d
JG
528 return NVM_IO_ERR;
529 }
d624f371 530
26f76dce 531 if (!pblk_ppa_empty(erase_ppa)) {
1e82123d 532 /* Submit erase for next data line */
dd2a4343
JG
533 if (pblk_blk_erase_async(pblk, erase_ppa)) {
534 struct pblk_line *e_line = pblk_line_get_erase(pblk);
535 struct nvm_tgt_dev *dev = pblk->dev;
536 struct nvm_geo *geo = &dev->geo;
537 int bit;
538
539 atomic_inc(&e_line->left_eblks);
540 bit = pblk_ppa_to_pos(geo, erase_ppa);
541 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
542 }
d624f371
JG
543 }
544
1e82123d
JG
545 if (meta_line) {
546 /* Submit metadata write for previous data line */
547 err = pblk_submit_meta_io(pblk, meta_line);
548 if (err) {
4e495a46
MB
549 pblk_err(pblk, "metadata I/O submission failed: %d",
550 err);
1e82123d
JG
551 return NVM_IO_ERR;
552 }
553 }
554
d624f371
JG
555 return NVM_IO_OK;
556}
557
558static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
559{
560 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
561 struct bio *bio = rqd->bio;
562
563 if (c_ctx->nr_padded)
cd8ddbf7
JG
564 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
565 c_ctx->nr_padded);
d624f371
JG
566}
567
a4bd217b
JG
568static int pblk_submit_write(struct pblk *pblk)
569{
570 struct bio *bio;
571 struct nvm_rq *rqd;
a4bd217b
JG
572 unsigned int secs_avail, secs_to_sync, secs_to_com;
573 unsigned int secs_to_flush;
574 unsigned long pos;
6a3abf5b 575 unsigned int resubmit;
a4bd217b 576
6a3abf5b
HH
577 spin_lock(&pblk->resubmit_lock);
578 resubmit = !list_empty(&pblk->resubmit_list);
579 spin_unlock(&pblk->resubmit_lock);
580
581 /* Resubmit failed writes first */
582 if (resubmit) {
583 struct pblk_c_ctx *r_ctx;
584
585 spin_lock(&pblk->resubmit_lock);
586 r_ctx = list_first_entry(&pblk->resubmit_list,
587 struct pblk_c_ctx, list);
588 list_del(&r_ctx->list);
589 spin_unlock(&pblk->resubmit_lock);
590
591 secs_avail = r_ctx->nr_valid;
592 pos = r_ctx->sentry;
593
594 pblk_prepare_resubmit(pblk, pos, secs_avail);
595 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
596 secs_avail);
a4bd217b 597
6a3abf5b
HH
598 kfree(r_ctx);
599 } else {
600 /* If there are no sectors in the cache,
601 * flushes (bios without data) will be cleared on
602 * the cache threads
603 */
604 secs_avail = pblk_rb_read_count(&pblk->rwb);
605 if (!secs_avail)
606 return 1;
607
608 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
609 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
610 return 1;
611
612 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
613 secs_to_flush);
614 if (secs_to_sync > pblk->max_write_pgs) {
4e495a46 615 pblk_err(pblk, "bad buffer sync calculation\n");
6a3abf5b
HH
616 return 1;
617 }
618
619 secs_to_com = (secs_to_sync > secs_avail) ?
620 secs_avail : secs_to_sync;
621 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
622 }
a4bd217b 623
875d94f3
JG
624 bio = bio_alloc(GFP_KERNEL, secs_to_sync);
625
626 bio->bi_iter.bi_sector = 0; /* internal bio */
627 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
628
e2cddf20 629 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
875d94f3
JG
630 rqd->bio = bio;
631
632 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
d624f371 633 secs_avail)) {
4e495a46 634 pblk_err(pblk, "corrupted write bio\n");
a4bd217b
JG
635 goto fail_put_bio;
636 }
637
d624f371 638 if (pblk_submit_io_set(pblk, rqd))
a4bd217b 639 goto fail_free_bio;
a4bd217b 640
880eda54 641#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
642 atomic_long_add(secs_to_sync, &pblk->sub_writes);
643#endif
644
645 return 0;
646
647fail_free_bio:
d624f371 648 pblk_free_write_rqd(pblk, rqd);
a4bd217b
JG
649fail_put_bio:
650 bio_put(bio);
e2cddf20 651 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
a4bd217b
JG
652
653 return 1;
654}
655
656int pblk_write_ts(void *data)
657{
658 struct pblk *pblk = data;
659
660 while (!kthread_should_stop()) {
661 if (!pblk_submit_write(pblk))
662 continue;
663 set_current_state(TASK_INTERRUPTIBLE);
664 io_schedule();
665 }
666
667 return 0;
668}