Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | /* |
2 | * background writeback - scan btree for dirty data and write it to the backing | |
3 | * device | |
4 | * | |
5 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
6 | * Copyright 2012 Google, Inc. | |
7 | */ | |
8 | ||
9 | #include "bcache.h" | |
10 | #include "btree.h" | |
11 | #include "debug.h" | |
279afbad | 12 | #include "writeback.h" |
cafe5635 | 13 | |
5e6926da | 14 | #include <linux/delay.h> |
5e6926da | 15 | #include <linux/kthread.h> |
c37511b8 KO |
16 | #include <trace/events/bcache.h> |
17 | ||
cafe5635 KO |
18 | /* Rate limiting */ |
19 | ||
20 | static void __update_writeback_rate(struct cached_dev *dc) | |
21 | { | |
22 | struct cache_set *c = dc->disk.c; | |
23 | uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size; | |
24 | uint64_t cache_dirty_target = | |
25 | div_u64(cache_sectors * dc->writeback_percent, 100); | |
26 | ||
27 | int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev), | |
28 | c->cached_dev_sectors); | |
29 | ||
30 | /* PD controller */ | |
31 | ||
279afbad | 32 | int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); |
cafe5635 | 33 | int64_t derivative = dirty - dc->disk.sectors_dirty_last; |
16749c23 KO |
34 | int64_t proportional = dirty - target; |
35 | int64_t change; | |
cafe5635 KO |
36 | |
37 | dc->disk.sectors_dirty_last = dirty; | |
38 | ||
16749c23 | 39 | /* Scale to sectors per second */ |
cafe5635 | 40 | |
16749c23 KO |
41 | proportional *= dc->writeback_rate_update_seconds; |
42 | proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); | |
cafe5635 | 43 | |
16749c23 | 44 | derivative = div_s64(derivative, dc->writeback_rate_update_seconds); |
cafe5635 | 45 | |
16749c23 KO |
46 | derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, |
47 | (dc->writeback_rate_d_term / | |
48 | dc->writeback_rate_update_seconds) ?: 1, 0); | |
49 | ||
50 | derivative *= dc->writeback_rate_d_term; | |
51 | derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); | |
cafe5635 | 52 | |
16749c23 | 53 | change = proportional + derivative; |
cafe5635 KO |
54 | |
55 | /* Don't increase writeback rate if the device isn't keeping up */ | |
56 | if (change > 0 && | |
57 | time_after64(local_clock(), | |
16749c23 | 58 | dc->writeback_rate.next + NSEC_PER_MSEC)) |
cafe5635 KO |
59 | change = 0; |
60 | ||
61 | dc->writeback_rate.rate = | |
16749c23 | 62 | clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change, |
cafe5635 | 63 | 1, NSEC_PER_MSEC); |
16749c23 KO |
64 | |
65 | dc->writeback_rate_proportional = proportional; | |
cafe5635 KO |
66 | dc->writeback_rate_derivative = derivative; |
67 | dc->writeback_rate_change = change; | |
68 | dc->writeback_rate_target = target; | |
cafe5635 KO |
69 | } |
70 | ||
71 | static void update_writeback_rate(struct work_struct *work) | |
72 | { | |
73 | struct cached_dev *dc = container_of(to_delayed_work(work), | |
74 | struct cached_dev, | |
75 | writeback_rate_update); | |
76 | ||
77 | down_read(&dc->writeback_lock); | |
78 | ||
79 | if (atomic_read(&dc->has_dirty) && | |
80 | dc->writeback_percent) | |
81 | __update_writeback_rate(dc); | |
82 | ||
83 | up_read(&dc->writeback_lock); | |
5e6926da KO |
84 | |
85 | schedule_delayed_work(&dc->writeback_rate_update, | |
86 | dc->writeback_rate_update_seconds * HZ); | |
cafe5635 KO |
87 | } |
88 | ||
89 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) | |
90 | { | |
c4d951dd | 91 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
cafe5635 KO |
92 | !dc->writeback_percent) |
93 | return 0; | |
94 | ||
16749c23 | 95 | return bch_next_delay(&dc->writeback_rate, sectors); |
cafe5635 KO |
96 | } |
97 | ||
5e6926da KO |
98 | struct dirty_io { |
99 | struct closure cl; | |
100 | struct cached_dev *dc; | |
101 | struct bio bio; | |
102 | }; | |
72c27061 | 103 | |
cafe5635 KO |
104 | static void dirty_init(struct keybuf_key *w) |
105 | { | |
106 | struct dirty_io *io = w->private; | |
107 | struct bio *bio = &io->bio; | |
108 | ||
109 | bio_init(bio); | |
110 | if (!io->dc->writeback_percent) | |
111 | bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | |
112 | ||
4f024f37 | 113 | bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; |
cafe5635 KO |
114 | bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); |
115 | bio->bi_private = w; | |
116 | bio->bi_io_vec = bio->bi_inline_vecs; | |
169ef1cf | 117 | bch_bio_map(bio, NULL); |
cafe5635 KO |
118 | } |
119 | ||
cafe5635 KO |
120 | static void dirty_io_destructor(struct closure *cl) |
121 | { | |
122 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
123 | kfree(io); | |
124 | } | |
125 | ||
126 | static void write_dirty_finish(struct closure *cl) | |
127 | { | |
128 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
129 | struct keybuf_key *w = io->bio.bi_private; | |
130 | struct cached_dev *dc = io->dc; | |
8e51e414 KO |
131 | struct bio_vec *bv; |
132 | int i; | |
cafe5635 | 133 | |
8e51e414 | 134 | bio_for_each_segment_all(bv, &io->bio, i) |
cafe5635 KO |
135 | __free_page(bv->bv_page); |
136 | ||
137 | /* This is kind of a dumb way of signalling errors. */ | |
138 | if (KEY_DIRTY(&w->key)) { | |
cc7b8819 | 139 | int ret; |
cafe5635 | 140 | unsigned i; |
0b93207a KO |
141 | struct keylist keys; |
142 | ||
0b93207a | 143 | bch_keylist_init(&keys); |
cafe5635 | 144 | |
1b207d80 KO |
145 | bkey_copy(keys.top, &w->key); |
146 | SET_KEY_DIRTY(keys.top, false); | |
147 | bch_keylist_push(&keys); | |
cafe5635 KO |
148 | |
149 | for (i = 0; i < KEY_PTRS(&w->key); i++) | |
150 | atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); | |
151 | ||
cc7b8819 | 152 | ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); |
cafe5635 | 153 | |
6054c6d4 | 154 | if (ret) |
c37511b8 KO |
155 | trace_bcache_writeback_collision(&w->key); |
156 | ||
6054c6d4 | 157 | atomic_long_inc(ret |
cafe5635 KO |
158 | ? &dc->disk.c->writeback_keys_failed |
159 | : &dc->disk.c->writeback_keys_done); | |
160 | } | |
161 | ||
162 | bch_keybuf_del(&dc->writeback_keys, w); | |
c2a4f318 | 163 | up(&dc->in_flight); |
cafe5635 KO |
164 | |
165 | closure_return_with_destructor(cl, dirty_io_destructor); | |
166 | } | |
167 | ||
4246a0b6 | 168 | static void dirty_endio(struct bio *bio) |
cafe5635 KO |
169 | { |
170 | struct keybuf_key *w = bio->bi_private; | |
171 | struct dirty_io *io = w->private; | |
172 | ||
4246a0b6 | 173 | if (bio->bi_error) |
cafe5635 KO |
174 | SET_KEY_DIRTY(&w->key, false); |
175 | ||
176 | closure_put(&io->cl); | |
177 | } | |
178 | ||
179 | static void write_dirty(struct closure *cl) | |
180 | { | |
181 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
182 | struct keybuf_key *w = io->bio.bi_private; | |
183 | ||
184 | dirty_init(w); | |
ad0d9e76 | 185 | bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); |
4f024f37 | 186 | io->bio.bi_iter.bi_sector = KEY_START(&w->key); |
cafe5635 KO |
187 | io->bio.bi_bdev = io->dc->bdev; |
188 | io->bio.bi_end_io = dirty_endio; | |
189 | ||
749b61da | 190 | closure_bio_submit(&io->bio, cl); |
cafe5635 | 191 | |
c2a4f318 | 192 | continue_at(cl, write_dirty_finish, system_wq); |
cafe5635 KO |
193 | } |
194 | ||
4246a0b6 | 195 | static void read_dirty_endio(struct bio *bio) |
cafe5635 KO |
196 | { |
197 | struct keybuf_key *w = bio->bi_private; | |
198 | struct dirty_io *io = w->private; | |
199 | ||
200 | bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), | |
4246a0b6 | 201 | bio->bi_error, "reading dirty data from cache"); |
cafe5635 | 202 | |
4246a0b6 | 203 | dirty_endio(bio); |
cafe5635 KO |
204 | } |
205 | ||
206 | static void read_dirty_submit(struct closure *cl) | |
207 | { | |
208 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); | |
209 | ||
749b61da | 210 | closure_bio_submit(&io->bio, cl); |
cafe5635 | 211 | |
c2a4f318 | 212 | continue_at(cl, write_dirty, system_wq); |
cafe5635 KO |
213 | } |
214 | ||
5e6926da | 215 | static void read_dirty(struct cached_dev *dc) |
cafe5635 | 216 | { |
5e6926da | 217 | unsigned delay = 0; |
cafe5635 KO |
218 | struct keybuf_key *w; |
219 | struct dirty_io *io; | |
5e6926da KO |
220 | struct closure cl; |
221 | ||
222 | closure_init_stack(&cl); | |
cafe5635 KO |
223 | |
224 | /* | |
225 | * XXX: if we error, background writeback just spins. Should use some | |
226 | * mempools. | |
227 | */ | |
228 | ||
5e6926da | 229 | while (!kthread_should_stop()) { |
5e6926da | 230 | |
cafe5635 KO |
231 | w = bch_keybuf_next(&dc->writeback_keys); |
232 | if (!w) | |
233 | break; | |
234 | ||
235 | BUG_ON(ptr_stale(dc->disk.c, &w->key, 0)); | |
236 | ||
5e6926da KO |
237 | if (KEY_START(&w->key) != dc->last_read || |
238 | jiffies_to_msecs(delay) > 50) | |
239 | while (!kthread_should_stop() && delay) | |
9e5c3535 | 240 | delay = schedule_timeout_interruptible(delay); |
cafe5635 KO |
241 | |
242 | dc->last_read = KEY_OFFSET(&w->key); | |
243 | ||
244 | io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec) | |
245 | * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), | |
246 | GFP_KERNEL); | |
247 | if (!io) | |
248 | goto err; | |
249 | ||
250 | w->private = io; | |
251 | io->dc = dc; | |
252 | ||
253 | dirty_init(w); | |
ad0d9e76 | 254 | bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); |
4f024f37 | 255 | io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); |
cafe5635 KO |
256 | io->bio.bi_bdev = PTR_CACHE(dc->disk.c, |
257 | &w->key, 0)->bdev; | |
cafe5635 KO |
258 | io->bio.bi_end_io = read_dirty_endio; |
259 | ||
8e51e414 | 260 | if (bio_alloc_pages(&io->bio, GFP_KERNEL)) |
cafe5635 KO |
261 | goto err_free; |
262 | ||
c37511b8 | 263 | trace_bcache_writeback(&w->key); |
cafe5635 | 264 | |
c2a4f318 | 265 | down(&dc->in_flight); |
5e6926da | 266 | closure_call(&io->cl, read_dirty_submit, NULL, &cl); |
cafe5635 KO |
267 | |
268 | delay = writeback_delay(dc, KEY_SIZE(&w->key)); | |
cafe5635 KO |
269 | } |
270 | ||
271 | if (0) { | |
272 | err_free: | |
273 | kfree(w->private); | |
274 | err: | |
275 | bch_keybuf_del(&dc->writeback_keys, w); | |
276 | } | |
277 | ||
c2a4f318 KO |
278 | /* |
279 | * Wait for outstanding writeback IOs to finish (and keybuf slots to be | |
280 | * freed) before refilling again | |
281 | */ | |
5e6926da KO |
282 | closure_sync(&cl); |
283 | } | |
284 | ||
285 | /* Scan for dirty data */ | |
286 | ||
287 | void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, | |
288 | uint64_t offset, int nr_sectors) | |
289 | { | |
290 | struct bcache_device *d = c->devices[inode]; | |
48a915a8 | 291 | unsigned stripe_offset, stripe, sectors_dirty; |
5e6926da KO |
292 | |
293 | if (!d) | |
294 | return; | |
295 | ||
48a915a8 | 296 | stripe = offset_to_stripe(d, offset); |
5e6926da KO |
297 | stripe_offset = offset & (d->stripe_size - 1); |
298 | ||
299 | while (nr_sectors) { | |
300 | int s = min_t(unsigned, abs(nr_sectors), | |
301 | d->stripe_size - stripe_offset); | |
302 | ||
303 | if (nr_sectors < 0) | |
304 | s = -s; | |
305 | ||
48a915a8 KO |
306 | if (stripe >= d->nr_stripes) |
307 | return; | |
308 | ||
309 | sectors_dirty = atomic_add_return(s, | |
310 | d->stripe_sectors_dirty + stripe); | |
311 | if (sectors_dirty == d->stripe_size) | |
312 | set_bit(stripe, d->full_dirty_stripes); | |
313 | else | |
314 | clear_bit(stripe, d->full_dirty_stripes); | |
315 | ||
5e6926da KO |
316 | nr_sectors -= s; |
317 | stripe_offset = 0; | |
318 | stripe++; | |
319 | } | |
320 | } | |
321 | ||
322 | static bool dirty_pred(struct keybuf *buf, struct bkey *k) | |
323 | { | |
627ccd20 KO |
324 | struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); |
325 | ||
326 | BUG_ON(KEY_INODE(k) != dc->disk.id); | |
327 | ||
5e6926da KO |
328 | return KEY_DIRTY(k); |
329 | } | |
330 | ||
48a915a8 | 331 | static void refill_full_stripes(struct cached_dev *dc) |
5e6926da | 332 | { |
48a915a8 KO |
333 | struct keybuf *buf = &dc->writeback_keys; |
334 | unsigned start_stripe, stripe, next_stripe; | |
335 | bool wrapped = false; | |
336 | ||
337 | stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); | |
5e6926da | 338 | |
48a915a8 KO |
339 | if (stripe >= dc->disk.nr_stripes) |
340 | stripe = 0; | |
5e6926da | 341 | |
48a915a8 | 342 | start_stripe = stripe; |
5e6926da KO |
343 | |
344 | while (1) { | |
48a915a8 KO |
345 | stripe = find_next_bit(dc->disk.full_dirty_stripes, |
346 | dc->disk.nr_stripes, stripe); | |
5e6926da | 347 | |
48a915a8 KO |
348 | if (stripe == dc->disk.nr_stripes) |
349 | goto next; | |
5e6926da | 350 | |
48a915a8 KO |
351 | next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, |
352 | dc->disk.nr_stripes, stripe); | |
353 | ||
354 | buf->last_scanned = KEY(dc->disk.id, | |
355 | stripe * dc->disk.stripe_size, 0); | |
356 | ||
357 | bch_refill_keybuf(dc->disk.c, buf, | |
358 | &KEY(dc->disk.id, | |
359 | next_stripe * dc->disk.stripe_size, 0), | |
360 | dirty_pred); | |
361 | ||
362 | if (array_freelist_empty(&buf->freelist)) | |
363 | return; | |
364 | ||
365 | stripe = next_stripe; | |
366 | next: | |
367 | if (wrapped && stripe > start_stripe) | |
368 | return; | |
369 | ||
370 | if (stripe == dc->disk.nr_stripes) { | |
371 | stripe = 0; | |
372 | wrapped = true; | |
373 | } | |
5e6926da KO |
374 | } |
375 | } | |
376 | ||
627ccd20 KO |
377 | /* |
378 | * Returns true if we scanned the entire disk | |
379 | */ | |
5e6926da KO |
380 | static bool refill_dirty(struct cached_dev *dc) |
381 | { | |
382 | struct keybuf *buf = &dc->writeback_keys; | |
627ccd20 | 383 | struct bkey start = KEY(dc->disk.id, 0, 0); |
5e6926da | 384 | struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); |
627ccd20 KO |
385 | struct bkey start_pos; |
386 | ||
387 | /* | |
388 | * make sure keybuf pos is inside the range for this disk - at bringup | |
389 | * we might not be attached yet so this disk's inode nr isn't | |
390 | * initialized then | |
391 | */ | |
392 | if (bkey_cmp(&buf->last_scanned, &start) < 0 || | |
393 | bkey_cmp(&buf->last_scanned, &end) > 0) | |
394 | buf->last_scanned = start; | |
48a915a8 KO |
395 | |
396 | if (dc->partial_stripes_expensive) { | |
397 | refill_full_stripes(dc); | |
398 | if (array_freelist_empty(&buf->freelist)) | |
399 | return false; | |
400 | } | |
5e6926da | 401 | |
627ccd20 | 402 | start_pos = buf->last_scanned; |
48a915a8 | 403 | bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); |
5e6926da | 404 | |
627ccd20 KO |
405 | if (bkey_cmp(&buf->last_scanned, &end) < 0) |
406 | return false; | |
407 | ||
408 | /* | |
409 | * If we get to the end start scanning again from the beginning, and | |
410 | * only scan up to where we initially started scanning from: | |
411 | */ | |
412 | buf->last_scanned = start; | |
413 | bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); | |
414 | ||
415 | return bkey_cmp(&buf->last_scanned, &start_pos) >= 0; | |
5e6926da KO |
416 | } |
417 | ||
418 | static int bch_writeback_thread(void *arg) | |
419 | { | |
420 | struct cached_dev *dc = arg; | |
421 | bool searched_full_index; | |
422 | ||
423 | while (!kthread_should_stop()) { | |
424 | down_write(&dc->writeback_lock); | |
425 | if (!atomic_read(&dc->has_dirty) || | |
c4d951dd | 426 | (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && |
5e6926da KO |
427 | !dc->writeback_running)) { |
428 | up_write(&dc->writeback_lock); | |
429 | set_current_state(TASK_INTERRUPTIBLE); | |
430 | ||
431 | if (kthread_should_stop()) | |
432 | return 0; | |
433 | ||
5e6926da KO |
434 | schedule(); |
435 | continue; | |
436 | } | |
437 | ||
438 | searched_full_index = refill_dirty(dc); | |
439 | ||
440 | if (searched_full_index && | |
441 | RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { | |
442 | atomic_set(&dc->has_dirty, 0); | |
443 | cached_dev_put(dc); | |
444 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); | |
445 | bch_write_bdev_super(dc, NULL); | |
446 | } | |
447 | ||
448 | up_write(&dc->writeback_lock); | |
449 | ||
450 | bch_ratelimit_reset(&dc->writeback_rate); | |
451 | read_dirty(dc); | |
452 | ||
453 | if (searched_full_index) { | |
454 | unsigned delay = dc->writeback_delay * HZ; | |
455 | ||
456 | while (delay && | |
457 | !kthread_should_stop() && | |
c4d951dd | 458 | !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) |
9e5c3535 | 459 | delay = schedule_timeout_interruptible(delay); |
5e6926da KO |
460 | } |
461 | } | |
462 | ||
463 | return 0; | |
cafe5635 KO |
464 | } |
465 | ||
444fc0b6 KO |
466 | /* Init */ |
467 | ||
c18536a7 KO |
468 | struct sectors_dirty_init { |
469 | struct btree_op op; | |
470 | unsigned inode; | |
471 | }; | |
472 | ||
473 | static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, | |
48dad8ba | 474 | struct bkey *k) |
444fc0b6 | 475 | { |
c18536a7 KO |
476 | struct sectors_dirty_init *op = container_of(_op, |
477 | struct sectors_dirty_init, op); | |
48dad8ba KO |
478 | if (KEY_INODE(k) > op->inode) |
479 | return MAP_DONE; | |
444fc0b6 | 480 | |
48dad8ba KO |
481 | if (KEY_DIRTY(k)) |
482 | bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), | |
483 | KEY_START(k), KEY_SIZE(k)); | |
484 | ||
485 | return MAP_CONTINUE; | |
444fc0b6 KO |
486 | } |
487 | ||
488 | void bch_sectors_dirty_init(struct cached_dev *dc) | |
489 | { | |
c18536a7 | 490 | struct sectors_dirty_init op; |
444fc0b6 | 491 | |
b54d6934 | 492 | bch_btree_op_init(&op.op, -1); |
48dad8ba KO |
493 | op.inode = dc->disk.id; |
494 | ||
c18536a7 | 495 | bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), |
48dad8ba | 496 | sectors_dirty_init_fn, 0); |
16749c23 KO |
497 | |
498 | dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); | |
444fc0b6 KO |
499 | } |
500 | ||
9e5c3535 | 501 | void bch_cached_dev_writeback_init(struct cached_dev *dc) |
cafe5635 | 502 | { |
c2a4f318 | 503 | sema_init(&dc->in_flight, 64); |
cafe5635 | 504 | init_rwsem(&dc->writeback_lock); |
72c27061 | 505 | bch_keybuf_init(&dc->writeback_keys); |
cafe5635 KO |
506 | |
507 | dc->writeback_metadata = true; | |
508 | dc->writeback_running = true; | |
509 | dc->writeback_percent = 10; | |
510 | dc->writeback_delay = 30; | |
511 | dc->writeback_rate.rate = 1024; | |
512 | ||
16749c23 KO |
513 | dc->writeback_rate_update_seconds = 5; |
514 | dc->writeback_rate_d_term = 30; | |
515 | dc->writeback_rate_p_term_inverse = 6000; | |
cafe5635 | 516 | |
9e5c3535 SP |
517 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); |
518 | } | |
519 | ||
520 | int bch_cached_dev_writeback_start(struct cached_dev *dc) | |
521 | { | |
5e6926da KO |
522 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, |
523 | "bcache_writeback"); | |
524 | if (IS_ERR(dc->writeback_thread)) | |
525 | return PTR_ERR(dc->writeback_thread); | |
526 | ||
cafe5635 KO |
527 | schedule_delayed_work(&dc->writeback_rate_update, |
528 | dc->writeback_rate_update_seconds * HZ); | |
cafe5635 | 529 | |
9e5c3535 SP |
530 | bch_writeback_queue(dc); |
531 | ||
cafe5635 KO |
532 | return 0; |
533 | } |