2 * background writeback - scan btree for dirty data and write it to the backing
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
12 #include "writeback.h"
14 #include <linux/delay.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <trace/events/bcache.h>
21 static void __update_writeback_rate(struct cached_dev *dc)
23 struct cache_set *c = dc->disk.c;
24 uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
25 uint64_t cache_dirty_target =
26 div_u64(cache_sectors * dc->writeback_percent, 100);
28 int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
29 c->cached_dev_sectors);
35 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
36 int64_t derivative = dirty - dc->disk.sectors_dirty_last;
38 dc->disk.sectors_dirty_last = dirty;
40 derivative *= dc->writeback_rate_d_term;
41 derivative = clamp(derivative, -dirty, dirty);
43 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
44 dc->writeback_rate_d_smooth, 0);
46 /* Avoid divide by zero */
50 error = div64_s64((dirty + derivative - target) << 8, target);
52 change = div_s64((dc->writeback_rate.rate * error) >> 8,
53 dc->writeback_rate_p_term_inverse);
55 /* Don't increase writeback rate if the device isn't keeping up */
57 time_after64(local_clock(),
58 dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
61 dc->writeback_rate.rate =
62 clamp_t(int64_t, dc->writeback_rate.rate + change,
65 dc->writeback_rate_derivative = derivative;
66 dc->writeback_rate_change = change;
67 dc->writeback_rate_target = target;
70 static void update_writeback_rate(struct work_struct *work)
72 struct cached_dev *dc = container_of(to_delayed_work(work),
74 writeback_rate_update);
76 down_read(&dc->writeback_lock);
78 if (atomic_read(&dc->has_dirty) &&
79 dc->writeback_percent)
80 __update_writeback_rate(dc);
82 up_read(&dc->writeback_lock);
84 schedule_delayed_work(&dc->writeback_rate_update,
85 dc->writeback_rate_update_seconds * HZ);
88 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
92 if (atomic_read(&dc->disk.detaching) ||
93 !dc->writeback_percent)
96 ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
98 return min_t(uint64_t, ret, HZ);
103 struct cached_dev *dc;
107 static void dirty_init(struct keybuf_key *w)
109 struct dirty_io *io = w->private;
110 struct bio *bio = &io->bio;
113 if (!io->dc->writeback_percent)
114 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
116 bio->bi_size = KEY_SIZE(&w->key) << 9;
117 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
119 bio->bi_io_vec = bio->bi_inline_vecs;
120 bch_bio_map(bio, NULL);
123 static void dirty_io_destructor(struct closure *cl)
125 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
129 static void write_dirty_finish(struct closure *cl)
131 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
132 struct keybuf_key *w = io->bio.bi_private;
133 struct cached_dev *dc = io->dc;
137 bio_for_each_segment_all(bv, &io->bio, i)
138 __free_page(bv->bv_page);
140 /* This is kind of a dumb way of signalling errors. */
141 if (KEY_DIRTY(&w->key)) {
146 bch_btree_op_init_stack(&op);
147 bch_keylist_init(&keys);
149 op.type = BTREE_REPLACE;
150 bkey_copy(&op.replace, &w->key);
152 SET_KEY_DIRTY(&w->key, false);
153 bch_keylist_add(&keys, &w->key);
155 for (i = 0; i < KEY_PTRS(&w->key); i++)
156 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
158 bch_btree_insert(&op, dc->disk.c, &keys);
159 closure_sync(&op.cl);
161 if (op.insert_collision)
162 trace_bcache_writeback_collision(&w->key);
164 atomic_long_inc(op.insert_collision
165 ? &dc->disk.c->writeback_keys_failed
166 : &dc->disk.c->writeback_keys_done);
169 bch_keybuf_del(&dc->writeback_keys, w);
172 closure_return_with_destructor(cl, dirty_io_destructor);
175 static void dirty_endio(struct bio *bio, int error)
177 struct keybuf_key *w = bio->bi_private;
178 struct dirty_io *io = w->private;
181 SET_KEY_DIRTY(&w->key, false);
183 closure_put(&io->cl);
186 static void write_dirty(struct closure *cl)
188 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
189 struct keybuf_key *w = io->bio.bi_private;
192 io->bio.bi_rw = WRITE;
193 io->bio.bi_sector = KEY_START(&w->key);
194 io->bio.bi_bdev = io->dc->bdev;
195 io->bio.bi_end_io = dirty_endio;
197 closure_bio_submit(&io->bio, cl, &io->dc->disk);
199 continue_at(cl, write_dirty_finish, system_wq);
202 static void read_dirty_endio(struct bio *bio, int error)
204 struct keybuf_key *w = bio->bi_private;
205 struct dirty_io *io = w->private;
207 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
208 error, "reading dirty data from cache");
210 dirty_endio(bio, error);
213 static void read_dirty_submit(struct closure *cl)
215 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
217 closure_bio_submit(&io->bio, cl, &io->dc->disk);
219 continue_at(cl, write_dirty, system_wq);
222 static void read_dirty(struct cached_dev *dc)
225 struct keybuf_key *w;
229 closure_init_stack(&cl);
232 * XXX: if we error, background writeback just spins. Should use some
236 while (!kthread_should_stop()) {
239 w = bch_keybuf_next(&dc->writeback_keys);
243 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
245 if (KEY_START(&w->key) != dc->last_read ||
246 jiffies_to_msecs(delay) > 50)
247 while (!kthread_should_stop() && delay)
248 delay = schedule_timeout_interruptible(delay);
250 dc->last_read = KEY_OFFSET(&w->key);
252 io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
253 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
262 io->bio.bi_sector = PTR_OFFSET(&w->key, 0);
263 io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
265 io->bio.bi_rw = READ;
266 io->bio.bi_end_io = read_dirty_endio;
268 if (bio_alloc_pages(&io->bio, GFP_KERNEL))
271 trace_bcache_writeback(&w->key);
273 down(&dc->in_flight);
274 closure_call(&io->cl, read_dirty_submit, NULL, &cl);
276 delay = writeback_delay(dc, KEY_SIZE(&w->key));
283 bch_keybuf_del(&dc->writeback_keys, w);
287 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
288 * freed) before refilling again
293 /* Scan for dirty data */
295 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
296 uint64_t offset, int nr_sectors)
298 struct bcache_device *d = c->devices[inode];
299 unsigned stripe_offset;
300 uint64_t stripe = offset;
305 do_div(stripe, d->stripe_size);
307 stripe_offset = offset & (d->stripe_size - 1);
310 int s = min_t(unsigned, abs(nr_sectors),
311 d->stripe_size - stripe_offset);
316 atomic_add(s, d->stripe_sectors_dirty + stripe);
323 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
328 static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
330 uint64_t stripe = KEY_START(k);
331 unsigned nr_sectors = KEY_SIZE(k);
332 struct cached_dev *dc = container_of(buf, struct cached_dev,
338 do_div(stripe, dc->disk.stripe_size);
341 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) ==
342 dc->disk.stripe_size)
345 if (nr_sectors <= dc->disk.stripe_size)
348 nr_sectors -= dc->disk.stripe_size;
353 static bool refill_dirty(struct cached_dev *dc)
355 struct keybuf *buf = &dc->writeback_keys;
356 bool searched_from_start = false;
357 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
359 if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
360 buf->last_scanned = KEY(dc->disk.id, 0, 0);
361 searched_from_start = true;
364 if (dc->partial_stripes_expensive) {
367 for (i = 0; i < dc->disk.nr_stripes; i++)
368 if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
369 dc->disk.stripe_size)
374 searched_from_start = false; /* not searching entire btree */
375 bch_refill_keybuf(dc->disk.c, buf, &end,
376 dirty_full_stripe_pred);
379 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
382 return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
385 static int bch_writeback_thread(void *arg)
387 struct cached_dev *dc = arg;
388 bool searched_full_index;
390 while (!kthread_should_stop()) {
391 down_write(&dc->writeback_lock);
392 if (!atomic_read(&dc->has_dirty) ||
393 (!atomic_read(&dc->disk.detaching) &&
394 !dc->writeback_running)) {
395 up_write(&dc->writeback_lock);
396 set_current_state(TASK_INTERRUPTIBLE);
398 if (kthread_should_stop())
406 searched_full_index = refill_dirty(dc);
408 if (searched_full_index &&
409 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
410 atomic_set(&dc->has_dirty, 0);
412 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
413 bch_write_bdev_super(dc, NULL);
416 up_write(&dc->writeback_lock);
418 bch_ratelimit_reset(&dc->writeback_rate);
421 if (searched_full_index) {
422 unsigned delay = dc->writeback_delay * HZ;
425 !kthread_should_stop() &&
426 !atomic_read(&dc->disk.detaching))
427 delay = schedule_timeout_interruptible(delay);
436 static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b,
439 if (KEY_INODE(k) > op->inode)
443 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
444 KEY_START(k), KEY_SIZE(k));
449 void bch_sectors_dirty_init(struct cached_dev *dc)
453 bch_btree_op_init_stack(&op);
454 op.inode = dc->disk.id;
456 bch_btree_map_keys(&op, dc->disk.c, &KEY(op.inode, 0, 0),
457 sectors_dirty_init_fn, 0);
460 int bch_cached_dev_writeback_init(struct cached_dev *dc)
462 sema_init(&dc->in_flight, 64);
463 init_rwsem(&dc->writeback_lock);
464 bch_keybuf_init(&dc->writeback_keys);
466 dc->writeback_metadata = true;
467 dc->writeback_running = true;
468 dc->writeback_percent = 10;
469 dc->writeback_delay = 30;
470 dc->writeback_rate.rate = 1024;
472 dc->writeback_rate_update_seconds = 30;
473 dc->writeback_rate_d_term = 16;
474 dc->writeback_rate_p_term_inverse = 64;
475 dc->writeback_rate_d_smooth = 8;
477 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
479 if (IS_ERR(dc->writeback_thread))
480 return PTR_ERR(dc->writeback_thread);
482 set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
484 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
485 schedule_delayed_work(&dc->writeback_rate_update,
486 dc->writeback_rate_update_seconds * HZ);