lightnvm: pblk: fix releases of kmem cache in error path
[linux-2.6-block.git] / drivers / lightnvm / pblk-gc.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-gc.c - pblk's garbage collector
16 */
17
18#include "pblk.h"
19#include <linux/delay.h>
20
21static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
22{
d340121e
JG
23 if (gc_rq->data)
24 vfree(gc_rq->data);
a4bd217b
JG
25 kfree(gc_rq);
26}
27
28static int pblk_gc_write(struct pblk *pblk)
29{
30 struct pblk_gc *gc = &pblk->gc;
31 struct pblk_gc_rq *gc_rq, *tgc_rq;
32 LIST_HEAD(w_list);
33
34 spin_lock(&gc->w_lock);
35 if (list_empty(&gc->w_list)) {
36 spin_unlock(&gc->w_lock);
37 return 1;
38 }
39
b20ba1bc
JG
40 list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
41 gc->w_entries = 0;
a4bd217b
JG
42 spin_unlock(&gc->w_lock);
43
44 list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
d340121e 45 pblk_write_gc_to_cache(pblk, gc_rq);
a4bd217b 46 list_del(&gc_rq->list);
b20ba1bc 47 kref_put(&gc_rq->line->ref, pblk_line_put);
a4bd217b
JG
48 pblk_gc_free_gc_rq(gc_rq);
49 }
50
51 return 0;
52}
53
54static void pblk_gc_writer_kick(struct pblk_gc *gc)
55{
56 wake_up_process(gc->gc_writer_ts);
57}
58
a4bd217b
JG
59static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
60{
61 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
62 struct list_head *move_list;
63
64 spin_lock(&line->lock);
65 WARN_ON(line->state != PBLK_LINESTATE_GC);
66 line->state = PBLK_LINESTATE_CLOSED;
67 move_list = pblk_line_gc_list(pblk, line);
68 spin_unlock(&line->lock);
69
70 if (move_list) {
71 spin_lock(&l_mg->gc_lock);
72 list_add_tail(&line->list, move_list);
73 spin_unlock(&l_mg->gc_lock);
74 }
75}
76
77static void pblk_gc_line_ws(struct work_struct *work)
b20ba1bc 78{
b84ae4a8 79 struct pblk_line_ws *gc_rq_ws = container_of(work,
b20ba1bc 80 struct pblk_line_ws, ws);
b84ae4a8 81 struct pblk *pblk = gc_rq_ws->pblk;
2a19b10d
JG
82 struct nvm_tgt_dev *dev = pblk->dev;
83 struct nvm_geo *geo = &dev->geo;
b20ba1bc 84 struct pblk_gc *gc = &pblk->gc;
b84ae4a8
JG
85 struct pblk_line *line = gc_rq_ws->line;
86 struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
2a19b10d 87 int ret;
b20ba1bc
JG
88
89 up(&gc->gc_sem);
90
2a19b10d
JG
91 gc_rq->data = vmalloc(gc_rq->nr_secs * geo->sec_size);
92 if (!gc_rq->data) {
93 pr_err("pblk: could not GC line:%d (%d/%d)\n",
94 line->id, *line->vsc, gc_rq->nr_secs);
95 goto out;
96 }
97
98 /* Read from GC victim block */
99 ret = pblk_submit_read_gc(pblk, gc_rq);
100 if (ret) {
101 pr_err("pblk: failed GC read in line:%d (err:%d)\n",
102 line->id, ret);
103 goto out;
104 }
105
106 if (!gc_rq->secs_to_gc)
107 goto out;
108
109retry:
110 spin_lock(&gc->w_lock);
111 if (gc->w_entries >= PBLK_GC_RQ_QD) {
112 spin_unlock(&gc->w_lock);
113 pblk_gc_writer_kick(&pblk->gc);
114 usleep_range(128, 256);
115 goto retry;
b20ba1bc 116 }
2a19b10d
JG
117 gc->w_entries++;
118 list_add_tail(&gc_rq->list, &gc->w_list);
119 spin_unlock(&gc->w_lock);
120
121 pblk_gc_writer_kick(&pblk->gc);
b20ba1bc 122
b84ae4a8 123 kfree(gc_rq_ws);
2a19b10d
JG
124 return;
125
126out:
127 pblk_gc_free_gc_rq(gc_rq);
128 kref_put(&line->ref, pblk_line_put);
129 kfree(gc_rq_ws);
b20ba1bc
JG
130}
131
132static void pblk_gc_line_prepare_ws(struct work_struct *work)
a4bd217b
JG
133{
134 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
135 ws);
136 struct pblk *pblk = line_ws->pblk;
a4bd217b 137 struct pblk_line *line = line_ws->line;
b20ba1bc 138 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
a4bd217b 139 struct pblk_line_meta *lm = &pblk->lm;
b20ba1bc
JG
140 struct pblk_gc *gc = &pblk->gc;
141 struct line_emeta *emeta_buf;
b84ae4a8 142 struct pblk_line_ws *gc_rq_ws;
b20ba1bc 143 struct pblk_gc_rq *gc_rq;
dd2a4343 144 __le64 *lba_list;
d340121e 145 unsigned long *invalid_bitmap;
b20ba1bc
JG
146 int sec_left, nr_secs, bit;
147 int ret;
a4bd217b 148
d340121e
JG
149 invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
150 if (!invalid_bitmap) {
151 pr_err("pblk: could not allocate GC invalid bitmap\n");
152 goto fail_free_ws;
153 }
154
b20ba1bc
JG
155 emeta_buf = pblk_malloc(lm->emeta_len[0], l_mg->emeta_alloc_type,
156 GFP_KERNEL);
157 if (!emeta_buf) {
158 pr_err("pblk: cannot use GC emeta\n");
d340121e 159 goto fail_free_bitmap;
b20ba1bc
JG
160 }
161
162 ret = pblk_line_read_emeta(pblk, line, emeta_buf);
163 if (ret) {
164 pr_err("pblk: line %d read emeta failed (%d)\n", line->id, ret);
165 goto fail_free_emeta;
166 }
a4bd217b 167
dd2a4343
JG
168 /* If this read fails, it means that emeta is corrupted. For now, leave
169 * the line untouched. TODO: Implement a recovery routine that scans and
170 * moves all sectors on the line.
171 */
172 lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
173 if (!lba_list) {
174 pr_err("pblk: could not interpret emeta (line %d)\n", line->id);
b20ba1bc 175 goto fail_free_emeta;
a4bd217b 176 }
a4bd217b 177
d340121e
JG
178 spin_lock(&line->lock);
179 bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
b20ba1bc 180 sec_left = pblk_line_vsc(line);
d340121e
JG
181 spin_unlock(&line->lock);
182
a4bd217b
JG
183 if (sec_left < 0) {
184 pr_err("pblk: corrupted GC line (%d)\n", line->id);
b20ba1bc 185 goto fail_free_emeta;
a4bd217b
JG
186 }
187
188 bit = -1;
189next_rq:
b20ba1bc
JG
190 gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
191 if (!gc_rq)
192 goto fail_free_emeta;
a4bd217b 193
b20ba1bc 194 nr_secs = 0;
a4bd217b 195 do {
d340121e 196 bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
a4bd217b
JG
197 bit + 1);
198 if (bit > line->emeta_ssec)
199 break;
200
d340121e 201 gc_rq->paddr_list[nr_secs] = bit;
b20ba1bc
JG
202 gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
203 } while (nr_secs < pblk->max_write_pgs);
a4bd217b 204
b20ba1bc
JG
205 if (unlikely(!nr_secs)) {
206 kfree(gc_rq);
a4bd217b
JG
207 goto out;
208 }
209
b20ba1bc
JG
210 gc_rq->nr_secs = nr_secs;
211 gc_rq->line = line;
212
b84ae4a8
JG
213 gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
214 if (!gc_rq_ws)
b20ba1bc
JG
215 goto fail_free_gc_rq;
216
b84ae4a8
JG
217 gc_rq_ws->pblk = pblk;
218 gc_rq_ws->line = line;
219 gc_rq_ws->priv = gc_rq;
b20ba1bc
JG
220
221 down(&gc->gc_sem);
222 kref_get(&line->ref);
223
b84ae4a8
JG
224 INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
225 queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
a4bd217b 226
b20ba1bc 227 sec_left -= nr_secs;
a4bd217b
JG
228 if (sec_left > 0)
229 goto next_rq;
230
231out:
dd2a4343 232 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
b84ae4a8 233 kfree(line_ws);
d340121e 234 kfree(invalid_bitmap);
b20ba1bc
JG
235
236 kref_put(&line->ref, pblk_line_put);
237 atomic_dec(&gc->inflight_gc);
238
239 return;
240
241fail_free_gc_rq:
242 kfree(gc_rq);
243fail_free_emeta:
244 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
d340121e
JG
245fail_free_bitmap:
246 kfree(invalid_bitmap);
247fail_free_ws:
248 kfree(line_ws);
249
b20ba1bc
JG
250 pblk_put_line_back(pblk, line);
251 kref_put(&line->ref, pblk_line_put);
b20ba1bc
JG
252 atomic_dec(&gc->inflight_gc);
253
254 pr_err("pblk: Failed to GC line %d\n", line->id);
a4bd217b
JG
255}
256
257static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
258{
b20ba1bc 259 struct pblk_gc *gc = &pblk->gc;
a4bd217b 260 struct pblk_line_ws *line_ws;
a4bd217b 261
b20ba1bc 262 pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id);
a4bd217b 263
b84ae4a8 264 line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
b20ba1bc
JG
265 if (!line_ws)
266 return -ENOMEM;
a4bd217b 267
a4bd217b
JG
268 line_ws->pblk = pblk;
269 line_ws->line = line;
a4bd217b 270
b20ba1bc
JG
271 INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
272 queue_work(gc->gc_reader_wq, &line_ws->ws);
a4bd217b
JG
273
274 return 0;
a4bd217b
JG
275}
276
b20ba1bc 277static int pblk_gc_read(struct pblk *pblk)
a4bd217b 278{
b20ba1bc
JG
279 struct pblk_gc *gc = &pblk->gc;
280 struct pblk_line *line;
a4bd217b 281
b20ba1bc
JG
282 spin_lock(&gc->r_lock);
283 if (list_empty(&gc->r_list)) {
284 spin_unlock(&gc->r_lock);
285 return 1;
a4bd217b 286 }
b20ba1bc
JG
287
288 line = list_first_entry(&gc->r_list, struct pblk_line, list);
289 list_del(&line->list);
290 spin_unlock(&gc->r_lock);
291
292 pblk_gc_kick(pblk);
293
294 if (pblk_gc_line(pblk, line))
295 pr_err("pblk: failed to GC line %d\n", line->id);
296
297 return 0;
298}
299
300static void pblk_gc_reader_kick(struct pblk_gc *gc)
301{
302 wake_up_process(gc->gc_reader_ts);
a4bd217b
JG
303}
304
d45ebd47
JG
305static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
306 struct list_head *group_list)
307{
308 struct pblk_line *line, *victim;
f417aa0b 309 int line_vsc, victim_vsc;
d45ebd47
JG
310
311 victim = list_first_entry(group_list, struct pblk_line, list);
312 list_for_each_entry(line, group_list, list) {
f417aa0b
JG
313 line_vsc = le32_to_cpu(*line->vsc);
314 victim_vsc = le32_to_cpu(*victim->vsc);
315 if (line_vsc < victim_vsc)
d45ebd47
JG
316 victim = line;
317 }
318
319 return victim;
320}
321
b20ba1bc
JG
322static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
323{
324 unsigned int nr_blocks_free, nr_blocks_need;
325
326 nr_blocks_need = pblk_rl_high_thrs(rl);
327 nr_blocks_free = pblk_rl_nr_free_blks(rl);
328
329 /* This is not critical, no need to take lock here */
330 return ((gc->gc_active) && (nr_blocks_need > nr_blocks_free));
331}
332
a4bd217b
JG
333/*
334 * Lines with no valid sectors will be returned to the free list immediately. If
335 * GC is activated - either because the free block count is under the determined
336 * threshold, or because it is being forced from user space - only lines with a
337 * high count of invalid sectors will be recycled.
338 */
339static void pblk_gc_run(struct pblk *pblk)
340{
341 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
342 struct pblk_gc *gc = &pblk->gc;
b20ba1bc 343 struct pblk_line *line;
a4bd217b 344 struct list_head *group_list;
b20ba1bc
JG
345 bool run_gc;
346 int inflight_gc, gc_group = 0, prev_group = 0;
347
348 do {
349 spin_lock(&l_mg->gc_lock);
350 if (list_empty(&l_mg->gc_full_list)) {
351 spin_unlock(&l_mg->gc_lock);
352 break;
353 }
354
355 line = list_first_entry(&l_mg->gc_full_list,
356 struct pblk_line, list);
a4bd217b 357
a4bd217b
JG
358 spin_lock(&line->lock);
359 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
360 line->state = PBLK_LINESTATE_GC;
361 spin_unlock(&line->lock);
362
363 list_del(&line->list);
b20ba1bc
JG
364 spin_unlock(&l_mg->gc_lock);
365
a4bd217b 366 kref_put(&line->ref, pblk_line_put);
b20ba1bc 367 } while (1);
a4bd217b 368
b20ba1bc
JG
369 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
370 if (!run_gc || (atomic_read(&gc->inflight_gc) >= PBLK_GC_L_QD))
371 return;
a4bd217b
JG
372
373next_gc_group:
374 group_list = l_mg->gc_lists[gc_group++];
b20ba1bc
JG
375
376 do {
377 spin_lock(&l_mg->gc_lock);
378 if (list_empty(group_list)) {
a4bd217b 379 spin_unlock(&l_mg->gc_lock);
b20ba1bc 380 break;
a4bd217b
JG
381 }
382
d45ebd47 383 line = pblk_gc_get_victim_line(pblk, group_list);
a4bd217b
JG
384
385 spin_lock(&line->lock);
386 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
387 line->state = PBLK_LINESTATE_GC;
a4bd217b
JG
388 spin_unlock(&line->lock);
389
b20ba1bc
JG
390 list_del(&line->list);
391 spin_unlock(&l_mg->gc_lock);
392
393 spin_lock(&gc->r_lock);
394 list_add_tail(&line->list, &gc->r_list);
395 spin_unlock(&gc->r_lock);
396
397 inflight_gc = atomic_inc_return(&gc->inflight_gc);
398 pblk_gc_reader_kick(gc);
a4bd217b 399
b20ba1bc 400 prev_group = 1;
a4bd217b 401
b20ba1bc
JG
402 /* No need to queue up more GC lines than we can handle */
403 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
404 if (!run_gc || inflight_gc >= PBLK_GC_L_QD)
405 break;
406 } while (1);
407
408 if (!prev_group && pblk->rl.rb_state > gc_group &&
409 gc_group < PBLK_GC_NR_LISTS)
a4bd217b
JG
410 goto next_gc_group;
411}
412
b20ba1bc 413void pblk_gc_kick(struct pblk *pblk)
a4bd217b
JG
414{
415 struct pblk_gc *gc = &pblk->gc;
416
417 wake_up_process(gc->gc_ts);
418 pblk_gc_writer_kick(gc);
b20ba1bc 419 pblk_gc_reader_kick(gc);
a4bd217b
JG
420 mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
421}
422
423static void pblk_gc_timer(unsigned long data)
424{
425 struct pblk *pblk = (struct pblk *)data;
426
427 pblk_gc_kick(pblk);
428}
429
430static int pblk_gc_ts(void *data)
431{
432 struct pblk *pblk = data;
433
434 while (!kthread_should_stop()) {
435 pblk_gc_run(pblk);
436 set_current_state(TASK_INTERRUPTIBLE);
437 io_schedule();
438 }
439
440 return 0;
441}
442
443static int pblk_gc_writer_ts(void *data)
444{
445 struct pblk *pblk = data;
446
447 while (!kthread_should_stop()) {
448 if (!pblk_gc_write(pblk))
449 continue;
450 set_current_state(TASK_INTERRUPTIBLE);
451 io_schedule();
452 }
453
454 return 0;
455}
456
b20ba1bc 457static int pblk_gc_reader_ts(void *data)
a4bd217b 458{
b20ba1bc 459 struct pblk *pblk = data;
a4bd217b 460
b20ba1bc
JG
461 while (!kthread_should_stop()) {
462 if (!pblk_gc_read(pblk))
463 continue;
464 set_current_state(TASK_INTERRUPTIBLE);
465 io_schedule();
466 }
467
468 return 0;
a4bd217b
JG
469}
470
b20ba1bc 471static void pblk_gc_start(struct pblk *pblk)
a4bd217b 472{
b20ba1bc
JG
473 pblk->gc.gc_active = 1;
474 pr_debug("pblk: gc start\n");
a4bd217b
JG
475}
476
b20ba1bc 477void pblk_gc_should_start(struct pblk *pblk)
a4bd217b
JG
478{
479 struct pblk_gc *gc = &pblk->gc;
480
a4bd217b
JG
481 if (gc->gc_enabled && !gc->gc_active)
482 pblk_gc_start(pblk);
a4bd217b 483
b20ba1bc 484 pblk_gc_kick(pblk);
a4bd217b
JG
485}
486
487/*
488 * If flush_wq == 1 then no lock should be held by the caller since
489 * flush_workqueue can sleep
490 */
491static void pblk_gc_stop(struct pblk *pblk, int flush_wq)
492{
a4bd217b 493 pblk->gc.gc_active = 0;
a4bd217b
JG
494 pr_debug("pblk: gc stop\n");
495}
496
497void pblk_gc_should_stop(struct pblk *pblk)
498{
499 struct pblk_gc *gc = &pblk->gc;
500
501 if (gc->gc_active && !gc->gc_forced)
502 pblk_gc_stop(pblk, 0);
503}
504
505void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
506 int *gc_active)
507{
508 struct pblk_gc *gc = &pblk->gc;
509
510 spin_lock(&gc->lock);
511 *gc_enabled = gc->gc_enabled;
512 *gc_active = gc->gc_active;
513 spin_unlock(&gc->lock);
514}
515
b20ba1bc 516int pblk_gc_sysfs_force(struct pblk *pblk, int force)
a4bd217b
JG
517{
518 struct pblk_gc *gc = &pblk->gc;
b20ba1bc
JG
519
520 if (force < 0 || force > 1)
521 return -EINVAL;
a4bd217b
JG
522
523 spin_lock(&gc->lock);
a4bd217b 524 gc->gc_forced = force;
b20ba1bc
JG
525
526 if (force)
527 gc->gc_enabled = 1;
528 else
529 gc->gc_enabled = 0;
a4bd217b 530 spin_unlock(&gc->lock);
b20ba1bc
JG
531
532 pblk_gc_should_start(pblk);
533
534 return 0;
a4bd217b
JG
535}
536
537int pblk_gc_init(struct pblk *pblk)
538{
539 struct pblk_gc *gc = &pblk->gc;
540 int ret;
541
542 gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
543 if (IS_ERR(gc->gc_ts)) {
544 pr_err("pblk: could not allocate GC main kthread\n");
545 return PTR_ERR(gc->gc_ts);
546 }
547
548 gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
549 "pblk-gc-writer-ts");
550 if (IS_ERR(gc->gc_writer_ts)) {
551 pr_err("pblk: could not allocate GC writer kthread\n");
552 ret = PTR_ERR(gc->gc_writer_ts);
553 goto fail_free_main_kthread;
554 }
555
b20ba1bc
JG
556 gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
557 "pblk-gc-reader-ts");
558 if (IS_ERR(gc->gc_reader_ts)) {
559 pr_err("pblk: could not allocate GC reader kthread\n");
560 ret = PTR_ERR(gc->gc_reader_ts);
561 goto fail_free_writer_kthread;
562 }
563
a4bd217b
JG
564 setup_timer(&gc->gc_timer, pblk_gc_timer, (unsigned long)pblk);
565 mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
566
567 gc->gc_active = 0;
568 gc->gc_forced = 0;
569 gc->gc_enabled = 1;
a4bd217b
JG
570 gc->w_entries = 0;
571 atomic_set(&gc->inflight_gc, 0);
572
b20ba1bc
JG
573 /* Workqueue that reads valid sectors from a line and submit them to the
574 * GC writer to be recycled.
575 */
576 gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
577 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
578 if (!gc->gc_line_reader_wq) {
579 pr_err("pblk: could not allocate GC line reader workqueue\n");
580 ret = -ENOMEM;
581 goto fail_free_reader_kthread;
582 }
583
584 /* Workqueue that prepare lines for GC */
585 gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
586 WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
a4bd217b
JG
587 if (!gc->gc_reader_wq) {
588 pr_err("pblk: could not allocate GC reader workqueue\n");
589 ret = -ENOMEM;
b20ba1bc 590 goto fail_free_reader_line_wq;
a4bd217b
JG
591 }
592
593 spin_lock_init(&gc->lock);
594 spin_lock_init(&gc->w_lock);
b20ba1bc
JG
595 spin_lock_init(&gc->r_lock);
596
3627896a 597 sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
b20ba1bc 598
a4bd217b 599 INIT_LIST_HEAD(&gc->w_list);
b20ba1bc 600 INIT_LIST_HEAD(&gc->r_list);
a4bd217b
JG
601
602 return 0;
603
b20ba1bc
JG
604fail_free_reader_line_wq:
605 destroy_workqueue(gc->gc_line_reader_wq);
606fail_free_reader_kthread:
607 kthread_stop(gc->gc_reader_ts);
a4bd217b
JG
608fail_free_writer_kthread:
609 kthread_stop(gc->gc_writer_ts);
503ec94e
DC
610fail_free_main_kthread:
611 kthread_stop(gc->gc_ts);
a4bd217b
JG
612
613 return ret;
614}
615
616void pblk_gc_exit(struct pblk *pblk)
617{
618 struct pblk_gc *gc = &pblk->gc;
619
620 flush_workqueue(gc->gc_reader_wq);
b20ba1bc 621 flush_workqueue(gc->gc_line_reader_wq);
a4bd217b
JG
622
623 del_timer(&gc->gc_timer);
624 pblk_gc_stop(pblk, 1);
625
626 if (gc->gc_ts)
627 kthread_stop(gc->gc_ts);
628
b20ba1bc
JG
629 if (gc->gc_reader_wq)
630 destroy_workqueue(gc->gc_reader_wq);
631
632 if (gc->gc_line_reader_wq)
633 destroy_workqueue(gc->gc_line_reader_wq);
a4bd217b
JG
634
635 if (gc->gc_writer_ts)
636 kthread_stop(gc->gc_writer_ts);
b20ba1bc
JG
637
638 if (gc->gc_reader_ts)
639 kthread_stop(gc->gc_reader_ts);
a4bd217b 640}