Merge tag 'selinux-pr-20181115' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / lightnvm / pblk-write.c
CommitLineData
02a1520d 1// SPDX-License-Identifier: GPL-2.0
a4bd217b
JG
2/*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * pblk-write.c - pblk's write path from write buffer to media
17 */
18
19#include "pblk.h"
4c44abf4 20#include "pblk-trace.h"
a4bd217b 21
a4bd217b
JG
22static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
23 struct pblk_c_ctx *c_ctx)
24{
a4bd217b 25 struct bio *original_bio;
533657c1 26 struct pblk_rb *rwb = &pblk->rwb;
a4bd217b
JG
27 unsigned long ret;
28 int i;
29
30 for (i = 0; i < c_ctx->nr_valid; i++) {
31 struct pblk_w_ctx *w_ctx;
533657c1
HH
32 int pos = c_ctx->sentry + i;
33 int flags;
34
35 w_ctx = pblk_rb_w_ctx(rwb, pos);
36 flags = READ_ONCE(w_ctx->flags);
37
38 if (flags & PBLK_FLUSH_ENTRY) {
39 flags &= ~PBLK_FLUSH_ENTRY;
40 /* Release flags on context. Protect from writes */
41 smp_store_release(&w_ctx->flags, flags);
42
880eda54 43#ifdef CONFIG_NVM_PBLK_DEBUG
533657c1
HH
44 atomic_dec(&rwb->inflight_flush_point);
45#endif
46 }
a4bd217b 47
a4bd217b
JG
48 while ((original_bio = bio_list_pop(&w_ctx->bios)))
49 bio_endio(original_bio);
50 }
51
cd8ddbf7
JG
52 if (c_ctx->nr_padded)
53 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
54 c_ctx->nr_padded);
55
880eda54 56#ifdef CONFIG_NVM_PBLK_DEBUG
e0e12a70 57 atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
a4bd217b
JG
58#endif
59
60 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
61
a4bd217b 62 bio_put(rqd->bio);
e2cddf20 63 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
a4bd217b
JG
64
65 return ret;
66}
67
68static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
69 struct nvm_rq *rqd,
70 struct pblk_c_ctx *c_ctx)
71{
72 list_del(&c_ctx->list);
73 return pblk_end_w_bio(pblk, rqd, c_ctx);
74}
75
76static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
77 struct pblk_c_ctx *c_ctx)
78{
79 struct pblk_c_ctx *c, *r;
80 unsigned long flags;
81 unsigned long pos;
82
880eda54 83#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
84 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
85#endif
e99e802f 86 pblk_up_rq(pblk, c_ctx->lun_bitmap);
a4bd217b
JG
87
88 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
89 if (pos == c_ctx->sentry) {
90 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
91
92retry:
93 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
94 rqd = nvm_rq_from_c_ctx(c);
95 if (c->sentry == pos) {
96 pos = pblk_end_queued_w_bio(pblk, rqd, c);
97 goto retry;
98 }
99 }
100 } else {
101 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
102 list_add_tail(&c_ctx->list, &pblk->compl_list);
103 }
104 pblk_rb_sync_end(&pblk->rwb, &flags);
105}
106
6a3abf5b
HH
107/* Map remaining sectors in chunk, starting from ppa */
108static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
a4bd217b 109{
6a3abf5b
HH
110 struct pblk_line *line;
111 struct ppa_addr map_ppa = *ppa;
112 u64 paddr;
113 int done = 0;
a4bd217b 114
cb21665c 115 line = pblk_ppa_to_line(pblk, *ppa);
6a3abf5b 116 spin_lock(&line->lock);
a4bd217b 117
6a3abf5b
HH
118 while (!done) {
119 paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
2942f50f 120
6a3abf5b
HH
121 if (!test_and_set_bit(paddr, line->map_bitmap))
122 line->left_msecs--;
a4bd217b 123
6a3abf5b
HH
124 if (!test_and_set_bit(paddr, line->invalid_bitmap))
125 le32_add_cpu(line->vsc, -1);
a4bd217b 126
bf82fa2f 127 done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
6a3abf5b 128 }
a4bd217b 129
48b8d208 130 line->w_err_gc->has_write_err = 1;
6a3abf5b
HH
131 spin_unlock(&line->lock);
132}
133
134static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
135 unsigned int nr_entries)
136{
137 struct pblk_rb *rb = &pblk->rwb;
138 struct pblk_rb_entry *entry;
139 struct pblk_line *line;
140 struct pblk_w_ctx *w_ctx;
141 struct ppa_addr ppa_l2p;
142 int flags;
40b8657d 143 unsigned int i;
6a3abf5b
HH
144
145 spin_lock(&pblk->trans_lock);
6a3abf5b 146 for (i = 0; i < nr_entries; i++) {
40b8657d 147 entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
6a3abf5b
HH
148 w_ctx = &entry->w_ctx;
149
150 /* Check if the lba has been overwritten */
151 ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
152 if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
153 w_ctx->lba = ADDR_EMPTY;
154
155 /* Mark up the entry as submittable again */
156 flags = READ_ONCE(w_ctx->flags);
157 flags |= PBLK_WRITTEN_DATA;
158 /* Release flags on write context. Protect from writes */
159 smp_store_release(&w_ctx->flags, flags);
a4bd217b 160
2e696f90 161 /* Decrease the reference count to the line as we will
6a3abf5b 162 * re-map these entries
a4bd217b 163 */
cb21665c 164 line = pblk_ppa_to_line(pblk, w_ctx->ppa);
6a3abf5b 165 kref_put(&line->ref, pblk_line_put);
a4bd217b 166 }
6a3abf5b
HH
167 spin_unlock(&pblk->trans_lock);
168}
a4bd217b 169
6a3abf5b
HH
170static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
171{
172 struct pblk_c_ctx *r_ctx;
173
174 r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
175 if (!r_ctx)
176 return;
177
178 r_ctx->lun_bitmap = NULL;
179 r_ctx->sentry = c_ctx->sentry;
180 r_ctx->nr_valid = c_ctx->nr_valid;
181 r_ctx->nr_padded = c_ctx->nr_padded;
182
183 spin_lock(&pblk->resubmit_lock);
184 list_add_tail(&r_ctx->list, &pblk->resubmit_list);
185 spin_unlock(&pblk->resubmit_lock);
186
880eda54 187#ifdef CONFIG_NVM_PBLK_DEBUG
6a3abf5b
HH
188 atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
189#endif
190}
191
192static void pblk_submit_rec(struct work_struct *work)
193{
194 struct pblk_rec_ctx *recovery =
195 container_of(work, struct pblk_rec_ctx, ws_rec);
196 struct pblk *pblk = recovery->pblk;
197 struct nvm_rq *rqd = recovery->rqd;
198 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
d68a9344 199 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
6a3abf5b
HH
200
201 pblk_log_write_err(pblk, rqd);
202
6a3abf5b
HH
203 pblk_map_remaining(pblk, ppa_list);
204 pblk_queue_resubmit(pblk, c_ctx);
205
e99e802f 206 pblk_up_rq(pblk, c_ctx->lun_bitmap);
6a3abf5b
HH
207 if (c_ctx->nr_padded)
208 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
209 c_ctx->nr_padded);
210 bio_put(rqd->bio);
211 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
212 mempool_free(recovery, &pblk->rec_pool);
213
214 atomic_dec(&pblk->inflight_io);
215}
216
217
218static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
219{
220 struct pblk_rec_ctx *recovery;
221
222 recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
223 if (!recovery) {
4e495a46 224 pblk_err(pblk, "could not allocate recovery work\n");
6a3abf5b 225 return;
a4bd217b
JG
226 }
227
6a3abf5b
HH
228 recovery->pblk = pblk;
229 recovery->rqd = rqd;
230
a4bd217b 231 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
ef576494 232 queue_work(pblk->close_wq, &recovery->ws_rec);
a4bd217b
JG
233}
234
235static void pblk_end_io_write(struct nvm_rq *rqd)
236{
237 struct pblk *pblk = rqd->private;
238 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
239
240 if (rqd->error) {
6a3abf5b
HH
241 pblk_end_w_fail(pblk, rqd);
242 return;
4c44abf4
HH
243 } else {
244 if (trace_pblk_chunk_state_enabled())
245 pblk_check_chunk_state_update(pblk, rqd);
880eda54 246#ifdef CONFIG_NVM_PBLK_DEBUG
4e4cbee9 247 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
a4bd217b 248#endif
4c44abf4 249 }
a4bd217b
JG
250
251 pblk_complete_write(pblk, rqd, c_ctx);
588726d3 252 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
253}
254
dd2a4343
JG
255static void pblk_end_io_write_meta(struct nvm_rq *rqd)
256{
257 struct pblk *pblk = rqd->private;
dd2a4343
JG
258 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
259 struct pblk_line *line = m_ctx->private;
260 struct pblk_emeta *emeta = line->emeta;
d68a9344 261 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
dd2a4343
JG
262 int sync;
263
43241cfe 264 pblk_up_chunk(pblk, ppa_list[0]);
dd2a4343
JG
265
266 if (rqd->error) {
267 pblk_log_write_err(pblk, rqd);
4e495a46 268 pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
48b8d208 269 line->w_err_gc->has_write_err = 1;
4c44abf4
HH
270 } else {
271 if (trace_pblk_chunk_state_enabled())
272 pblk_check_chunk_state_update(pblk, rqd);
dd2a4343 273 }
dd2a4343
JG
274
275 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
276 if (sync == emeta->nr_entries)
b84ae4a8
JG
277 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
278 GFP_ATOMIC, pblk->close_wq);
dd2a4343 279
e2cddf20 280 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
588726d3
JG
281
282 atomic_dec(&pblk->inflight_io);
dd2a4343
JG
283}
284
a4bd217b 285static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
45dcf29b 286 unsigned int nr_secs, nvm_end_io_fn(*end_io))
a4bd217b 287{
a4bd217b
JG
288 /* Setup write request */
289 rqd->opcode = NVM_OP_PWRITE;
290 rqd->nr_ppas = nr_secs;
d7b68016 291 rqd->is_seq = 1;
a4bd217b 292 rqd->private = pblk;
dd2a4343 293 rqd->end_io = end_io;
a4bd217b 294
45dcf29b 295 return pblk_alloc_rqd_meta(pblk, rqd);
a4bd217b
JG
296}
297
298static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
1e82123d 299 struct ppa_addr *erase_ppa)
a4bd217b
JG
300{
301 struct pblk_line_meta *lm = &pblk->lm;
d624f371 302 struct pblk_line *e_line = pblk_line_get_erase(pblk);
1e82123d 303 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
a4bd217b
JG
304 unsigned int valid = c_ctx->nr_valid;
305 unsigned int padded = c_ctx->nr_padded;
306 unsigned int nr_secs = valid + padded;
307 unsigned long *lun_bitmap;
1e82123d 308 int ret;
a4bd217b
JG
309
310 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
d624f371
JG
311 if (!lun_bitmap)
312 return -ENOMEM;
a4bd217b
JG
313 c_ctx->lun_bitmap = lun_bitmap;
314
dd2a4343 315 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
a4bd217b
JG
316 if (ret) {
317 kfree(lun_bitmap);
d624f371 318 return ret;
a4bd217b
JG
319 }
320
588726d3 321 if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
a4bd217b
JG
322 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
323 else
324 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
d624f371 325 valid, erase_ppa);
a4bd217b 326
d624f371 327 return 0;
a4bd217b
JG
328}
329
a4bd217b
JG
330static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
331 unsigned int secs_to_flush)
332{
333 int secs_to_sync;
334
335 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
336
880eda54 337#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
338 if ((!secs_to_sync && secs_to_flush)
339 || (secs_to_sync < 0)
340 || (secs_to_sync > secs_avail && !secs_to_flush)) {
4e495a46 341 pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
a4bd217b
JG
342 secs_avail, secs_to_sync, secs_to_flush);
343 }
344#endif
345
346 return secs_to_sync;
347}
348
dd2a4343
JG
349int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
350{
351 struct nvm_tgt_dev *dev = pblk->dev;
352 struct nvm_geo *geo = &dev->geo;
353 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
354 struct pblk_line_meta *lm = &pblk->lm;
355 struct pblk_emeta *emeta = meta_line->emeta;
d68a9344 356 struct ppa_addr *ppa_list;
dd2a4343 357 struct pblk_g_ctx *m_ctx;
dd2a4343
JG
358 struct bio *bio;
359 struct nvm_rq *rqd;
360 void *data;
361 u64 paddr;
362 int rq_ppas = pblk->min_write_pgs;
363 int id = meta_line->id;
364 int rq_len;
365 int i, j;
366 int ret;
367
e2cddf20 368 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
2942f50f 369
dd2a4343
JG
370 m_ctx = nvm_rq_to_pdu(rqd);
371 m_ctx->private = meta_line;
372
e46f4e48 373 rq_len = rq_ppas * geo->csecs;
dd2a4343
JG
374 data = ((void *)emeta->buf) + emeta->mem;
375
de54e703
JG
376 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
377 l_mg->emeta_alloc_type, GFP_KERNEL);
dd2a4343 378 if (IS_ERR(bio)) {
4e495a46 379 pblk_err(pblk, "failed to map emeta io");
dd2a4343
JG
380 ret = PTR_ERR(bio);
381 goto fail_free_rqd;
382 }
383 bio->bi_iter.bi_sector = 0; /* internal bio */
384 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
385 rqd->bio = bio;
386
387 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
388 if (ret)
389 goto fail_free_bio;
390
d68a9344 391 ppa_list = nvm_rq_to_ppa_list(rqd);
dd2a4343
JG
392 for (i = 0; i < rqd->nr_ppas; ) {
393 spin_lock(&meta_line->lock);
394 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
395 spin_unlock(&meta_line->lock);
396 for (j = 0; j < rq_ppas; j++, i++, paddr++)
d68a9344 397 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
dd2a4343
JG
398 }
399
d8adaa3b 400 spin_lock(&l_mg->close_lock);
dd2a4343 401 emeta->mem += rq_len;
d8adaa3b 402 if (emeta->mem >= lm->emeta_len[0])
dd2a4343 403 list_del(&meta_line->list);
d8adaa3b 404 spin_unlock(&l_mg->close_lock);
dd2a4343 405
43241cfe 406 pblk_down_chunk(pblk, ppa_list[0]);
3eaa11e2 407
dd2a4343
JG
408 ret = pblk_submit_io(pblk, rqd);
409 if (ret) {
4e495a46 410 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
dd2a4343
JG
411 goto fail_rollback;
412 }
413
414 return NVM_IO_OK;
415
416fail_rollback:
43241cfe 417 pblk_up_chunk(pblk, ppa_list[0]);
dd2a4343
JG
418 spin_lock(&l_mg->close_lock);
419 pblk_dealloc_page(pblk, meta_line, rq_ppas);
420 list_add(&meta_line->list, &meta_line->list);
421 spin_unlock(&l_mg->close_lock);
422fail_free_bio:
55e836d4 423 bio_put(bio);
dd2a4343 424fail_free_rqd:
e2cddf20 425 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
dd2a4343
JG
426 return ret;
427}
428
1e82123d
JG
429static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
430 struct pblk_line *meta_line,
431 struct nvm_rq *data_rqd)
432{
433 struct nvm_tgt_dev *dev = pblk->dev;
434 struct nvm_geo *geo = &dev->geo;
435 struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
436 struct pblk_line *data_line = pblk_line_get_data(pblk);
437 struct ppa_addr ppa, ppa_opt;
438 u64 paddr;
439 int pos_opt;
440
441 /* Schedule a metadata I/O that is half the distance from the data I/O
442 * with regards to the number of LUNs forming the pblk instance. This
443 * balances LUN conflicts across every I/O.
444 *
445 * When the LUN configuration changes (e.g., due to GC), this distance
446 * can align, which would result on metadata and data I/Os colliding. In
447 * this case, modify the distance to not be optimal, but move the
448 * optimal in the right direction.
449 */
450 paddr = pblk_lookup_page(pblk, meta_line);
451 ppa = addr_to_gen_ppa(pblk, paddr, 0);
452 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
453 pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
454
455 if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
456 test_bit(pos_opt, data_line->blk_bitmap))
457 return true;
458
459 if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
460 data_line->meta_distance--;
461
462 return false;
463}
464
465static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
466 struct nvm_rq *data_rqd)
dd2a4343
JG
467{
468 struct pblk_line_meta *lm = &pblk->lm;
469 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
470 struct pblk_line *meta_line;
471
472 spin_lock(&l_mg->close_lock);
dd2a4343
JG
473 if (list_empty(&l_mg->emeta_list)) {
474 spin_unlock(&l_mg->close_lock);
1e82123d 475 return NULL;
dd2a4343
JG
476 }
477 meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
d8adaa3b
JG
478 if (meta_line->emeta->mem >= lm->emeta_len[0]) {
479 spin_unlock(&l_mg->close_lock);
480 return NULL;
481 }
dd2a4343
JG
482 spin_unlock(&l_mg->close_lock);
483
1e82123d
JG
484 if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
485 return NULL;
dd2a4343 486
1e82123d 487 return meta_line;
dd2a4343
JG
488}
489
d624f371
JG
490static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
491{
d624f371 492 struct ppa_addr erase_ppa;
1e82123d 493 struct pblk_line *meta_line;
d624f371
JG
494 int err;
495
26f76dce 496 pblk_ppa_set_empty(&erase_ppa);
d624f371
JG
497
498 /* Assign lbas to ppas and populate request structure */
1e82123d 499 err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
d624f371 500 if (err) {
4e495a46 501 pblk_err(pblk, "could not setup write request: %d\n", err);
d624f371
JG
502 return NVM_IO_ERR;
503 }
504
1e82123d 505 meta_line = pblk_should_submit_meta_io(pblk, rqd);
dd2a4343 506
1e82123d
JG
507 /* Submit data write for current data line */
508 err = pblk_submit_io(pblk, rqd);
509 if (err) {
4e495a46 510 pblk_err(pblk, "data I/O submission failed: %d\n", err);
1e82123d
JG
511 return NVM_IO_ERR;
512 }
d624f371 513
26f76dce 514 if (!pblk_ppa_empty(erase_ppa)) {
1e82123d 515 /* Submit erase for next data line */
dd2a4343
JG
516 if (pblk_blk_erase_async(pblk, erase_ppa)) {
517 struct pblk_line *e_line = pblk_line_get_erase(pblk);
518 struct nvm_tgt_dev *dev = pblk->dev;
519 struct nvm_geo *geo = &dev->geo;
520 int bit;
521
522 atomic_inc(&e_line->left_eblks);
523 bit = pblk_ppa_to_pos(geo, erase_ppa);
524 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
525 }
d624f371
JG
526 }
527
1e82123d
JG
528 if (meta_line) {
529 /* Submit metadata write for previous data line */
530 err = pblk_submit_meta_io(pblk, meta_line);
531 if (err) {
4e495a46
MB
532 pblk_err(pblk, "metadata I/O submission failed: %d",
533 err);
1e82123d
JG
534 return NVM_IO_ERR;
535 }
536 }
537
d624f371
JG
538 return NVM_IO_OK;
539}
540
541static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
542{
543 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
544 struct bio *bio = rqd->bio;
545
546 if (c_ctx->nr_padded)
cd8ddbf7
JG
547 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
548 c_ctx->nr_padded);
d624f371
JG
549}
550
a4bd217b
JG
551static int pblk_submit_write(struct pblk *pblk)
552{
553 struct bio *bio;
554 struct nvm_rq *rqd;
a4bd217b
JG
555 unsigned int secs_avail, secs_to_sync, secs_to_com;
556 unsigned int secs_to_flush;
557 unsigned long pos;
6a3abf5b 558 unsigned int resubmit;
a4bd217b 559
6a3abf5b
HH
560 spin_lock(&pblk->resubmit_lock);
561 resubmit = !list_empty(&pblk->resubmit_list);
562 spin_unlock(&pblk->resubmit_lock);
563
564 /* Resubmit failed writes first */
565 if (resubmit) {
566 struct pblk_c_ctx *r_ctx;
567
568 spin_lock(&pblk->resubmit_lock);
569 r_ctx = list_first_entry(&pblk->resubmit_list,
570 struct pblk_c_ctx, list);
571 list_del(&r_ctx->list);
572 spin_unlock(&pblk->resubmit_lock);
573
574 secs_avail = r_ctx->nr_valid;
575 pos = r_ctx->sentry;
576
577 pblk_prepare_resubmit(pblk, pos, secs_avail);
578 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
579 secs_avail);
a4bd217b 580
6a3abf5b
HH
581 kfree(r_ctx);
582 } else {
583 /* If there are no sectors in the cache,
584 * flushes (bios without data) will be cleared on
585 * the cache threads
586 */
587 secs_avail = pblk_rb_read_count(&pblk->rwb);
588 if (!secs_avail)
589 return 1;
590
591 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
592 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
593 return 1;
594
595 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
596 secs_to_flush);
597 if (secs_to_sync > pblk->max_write_pgs) {
4e495a46 598 pblk_err(pblk, "bad buffer sync calculation\n");
6a3abf5b
HH
599 return 1;
600 }
601
602 secs_to_com = (secs_to_sync > secs_avail) ?
603 secs_avail : secs_to_sync;
604 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
605 }
a4bd217b 606
875d94f3
JG
607 bio = bio_alloc(GFP_KERNEL, secs_to_sync);
608
609 bio->bi_iter.bi_sector = 0; /* internal bio */
610 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
611
e2cddf20 612 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
875d94f3
JG
613 rqd->bio = bio;
614
615 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
d624f371 616 secs_avail)) {
4e495a46 617 pblk_err(pblk, "corrupted write bio\n");
a4bd217b
JG
618 goto fail_put_bio;
619 }
620
d624f371 621 if (pblk_submit_io_set(pblk, rqd))
a4bd217b 622 goto fail_free_bio;
a4bd217b 623
880eda54 624#ifdef CONFIG_NVM_PBLK_DEBUG
a4bd217b
JG
625 atomic_long_add(secs_to_sync, &pblk->sub_writes);
626#endif
627
628 return 0;
629
630fail_free_bio:
d624f371 631 pblk_free_write_rqd(pblk, rqd);
a4bd217b
JG
632fail_put_bio:
633 bio_put(bio);
e2cddf20 634 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
a4bd217b
JG
635
636 return 1;
637}
638
639int pblk_write_ts(void *data)
640{
641 struct pblk *pblk = data;
642
643 while (!kthread_should_stop()) {
644 if (!pblk_submit_write(pblk))
645 continue;
646 set_current_state(TASK_INTERRUPTIBLE);
647 io_schedule();
648 }
649
650 return 0;
651}