2 * bcache journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
11 #include <trace/events/bcache.h>
14 * Journal replay/recovery:
16 * This code is all driven from run_cache_set(); we first read the journal
17 * entries, do some other stuff, then we mark all the keys in the journal
18 * entries (same as garbage collection would), then we replay them - reinserting
19 * them into the cache in precisely the same order as they appear in the
22 * We only journal keys that go in leaf nodes, which simplifies things quite a
26 static void journal_read_endio(struct bio *bio, int error)
28 struct closure *cl = bio->bi_private;
32 static int journal_read_bucket(struct cache *ca, struct list_head *list,
33 unsigned bucket_index)
35 struct journal_device *ja = &ca->journal;
36 struct bio *bio = &ja->bio;
38 struct journal_replay *i;
39 struct jset *j, *data = ca->set->journal.w[0].data;
41 unsigned len, left, offset = 0;
43 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
45 closure_init_stack(&cl);
47 pr_debug("reading %llu", (uint64_t) bucket);
49 while (offset < ca->sb.bucket_size) {
50 reread: left = ca->sb.bucket_size - offset;
51 len = min_t(unsigned, left, PAGE_SECTORS * 8);
54 bio->bi_iter.bi_sector = bucket + offset;
55 bio->bi_bdev = ca->bdev;
57 bio->bi_iter.bi_size = len << 9;
59 bio->bi_end_io = journal_read_endio;
60 bio->bi_private = &cl;
61 bch_bio_map(bio, data);
63 closure_bio_submit(bio, &cl, ca);
66 /* This function could be simpler now since we no longer write
67 * journal entries that overlap bucket boundaries; this means
68 * the start of a bucket will always have a valid journal entry
69 * if it has any journal entries at all.
74 struct list_head *where;
75 size_t blocks, bytes = set_bytes(j);
77 if (j->magic != jset_magic(&ca->sb))
80 if (bytes > left << 9)
86 if (j->csum != csum_set(j))
89 blocks = set_blocks(j, ca->set);
91 while (!list_empty(list)) {
92 i = list_first_entry(list,
93 struct journal_replay, list);
94 if (i->j.seq >= j->last_seq)
100 list_for_each_entry_reverse(i, list, list) {
101 if (j->seq == i->j.seq)
104 if (j->seq < i->j.last_seq)
107 if (j->seq > i->j.seq) {
115 i = kmalloc(offsetof(struct journal_replay, j) +
119 memcpy(&i->j, j, bytes);
120 list_add(&i->list, where);
123 ja->seq[bucket_index] = j->seq;
125 offset += blocks * ca->sb.block_size;
126 len -= blocks * ca->sb.block_size;
127 j = ((void *) j) + blocks * block_bytes(ca);
134 int bch_journal_read(struct cache_set *c, struct list_head *list)
136 #define read_bucket(b) \
138 int ret = journal_read_bucket(ca, list, b); \
139 __set_bit(b, bitmap); \
148 for_each_cache(ca, c, iter) {
149 struct journal_device *ja = &ca->journal;
150 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
154 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
155 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
158 * Read journal buckets ordered by golden ratio hash to quickly
159 * find a sequence of buckets with valid journal entries
161 for (i = 0; i < ca->sb.njournal_buckets; i++) {
162 l = (i * 2654435769U) % ca->sb.njournal_buckets;
164 if (test_bit(l, bitmap))
172 * If that fails, check all the buckets we haven't checked
175 pr_debug("falling back to linear search");
177 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
178 l < ca->sb.njournal_buckets;
179 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
183 if (list_empty(list))
187 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
188 pr_debug("starting binary search, l %u r %u", l, r);
191 seq = list_entry(list->prev, struct journal_replay,
197 if (seq != list_entry(list->prev, struct journal_replay,
205 * Read buckets in reverse order until we stop finding more
208 pr_debug("finishing up: m %u njournal_buckets %u",
209 m, ca->sb.njournal_buckets);
214 l = ca->sb.njournal_buckets - 1;
219 if (test_bit(l, bitmap))
228 for (i = 0; i < ca->sb.njournal_buckets; i++)
229 if (ja->seq[i] > seq) {
231 ja->cur_idx = ja->discard_idx =
237 if (!list_empty(list))
238 c->journal.seq = list_entry(list->prev,
239 struct journal_replay,
246 void bch_journal_mark(struct cache_set *c, struct list_head *list)
250 struct journal_replay *i;
251 struct journal *j = &c->journal;
252 uint64_t last = j->seq;
255 * journal.pin should never fill up - we never write a journal
256 * entry when it would fill up. But if for some reason it does, we
257 * iterate over the list in reverse order so that we can just skip that
258 * refcount instead of bugging.
261 list_for_each_entry_reverse(i, list, list) {
262 BUG_ON(last < i->j.seq);
265 while (last-- != i->j.seq)
266 if (fifo_free(&j->pin) > 1) {
267 fifo_push_front(&j->pin, p);
268 atomic_set(&fifo_front(&j->pin), 0);
271 if (fifo_free(&j->pin) > 1) {
272 fifo_push_front(&j->pin, p);
273 i->pin = &fifo_front(&j->pin);
274 atomic_set(i->pin, 1);
282 for (j = 0; j < KEY_PTRS(k); j++) {
283 struct bucket *g = PTR_BUCKET(c, k, j);
286 if (g->prio == BTREE_PRIO &&
288 g->prio = INITIAL_PRIO;
291 __bch_btree_mark_key(c, 0, k);
296 int bch_journal_replay(struct cache_set *s, struct list_head *list)
298 int ret = 0, keys = 0, entries = 0;
300 struct journal_replay *i =
301 list_entry(list->prev, struct journal_replay, list);
303 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
304 struct keylist keylist;
306 bch_keylist_init(&keylist);
308 list_for_each_entry(i, list, list) {
309 BUG_ON(i->pin && atomic_read(i->pin) != 1);
311 cache_set_err_on(n != i->j.seq, s,
312 "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
313 n, i->j.seq - 1, start, end);
318 trace_bcache_journal_replay_key(k);
320 bkey_copy(keylist.top, k);
321 bch_keylist_push(&keylist);
323 ret = bch_btree_insert(s, &keylist, i->pin, NULL);
327 BUG_ON(!bch_keylist_empty(&keylist));
339 pr_info("journal replay done, %i keys in %i entries, seq %llu",
342 while (!list_empty(list)) {
343 i = list_first_entry(list, struct journal_replay, list);
353 static void btree_flush_write(struct cache_set *c)
356 * Try to find the btree node with that references the oldest journal
357 * entry, best is our current candidate and is locked if non NULL:
359 struct btree *b, *best;
364 for_each_cached_btree(b, c, i)
365 if (btree_current_write(b)->journal) {
368 else if (journal_pin_cmp(c,
369 btree_current_write(best)->journal,
370 btree_current_write(b)->journal)) {
377 rw_lock(true, b, b->level);
379 if (!btree_current_write(b)->journal) {
385 bch_btree_node_write(b, NULL);
390 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
392 static void journal_discard_endio(struct bio *bio, int error)
394 struct journal_device *ja =
395 container_of(bio, struct journal_device, discard_bio);
396 struct cache *ca = container_of(ja, struct cache, journal);
398 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
400 closure_wake_up(&ca->set->journal.wait);
401 closure_put(&ca->set->cl);
404 static void journal_discard_work(struct work_struct *work)
406 struct journal_device *ja =
407 container_of(work, struct journal_device, discard_work);
409 submit_bio(0, &ja->discard_bio);
412 static void do_journal_discard(struct cache *ca)
414 struct journal_device *ja = &ca->journal;
415 struct bio *bio = &ja->discard_bio;
418 ja->discard_idx = ja->last_idx;
422 switch (atomic_read(&ja->discard_in_flight)) {
423 case DISCARD_IN_FLIGHT:
427 ja->discard_idx = (ja->discard_idx + 1) %
428 ca->sb.njournal_buckets;
430 atomic_set(&ja->discard_in_flight, DISCARD_READY);
434 if (ja->discard_idx == ja->last_idx)
437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
440 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
441 ca->sb.d[ja->discard_idx]);
442 bio->bi_bdev = ca->bdev;
443 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
444 bio->bi_max_vecs = 1;
445 bio->bi_io_vec = bio->bi_inline_vecs;
446 bio->bi_iter.bi_size = bucket_bytes(ca);
447 bio->bi_end_io = journal_discard_endio;
449 closure_get(&ca->set->cl);
450 INIT_WORK(&ja->discard_work, journal_discard_work);
451 schedule_work(&ja->discard_work);
455 static void journal_reclaim(struct cache_set *c)
457 struct bkey *k = &c->journal.key;
460 unsigned iter, n = 0;
463 while (!atomic_read(&fifo_front(&c->journal.pin)))
464 fifo_pop(&c->journal.pin, p);
466 last_seq = last_seq(&c->journal);
468 /* Update last_idx */
470 for_each_cache(ca, c, iter) {
471 struct journal_device *ja = &ca->journal;
473 while (ja->last_idx != ja->cur_idx &&
474 ja->seq[ja->last_idx] < last_seq)
475 ja->last_idx = (ja->last_idx + 1) %
476 ca->sb.njournal_buckets;
479 for_each_cache(ca, c, iter)
480 do_journal_discard(ca);
482 if (c->journal.blocks_free)
487 * XXX: Sort by free journal space
490 for_each_cache(ca, c, iter) {
491 struct journal_device *ja = &ca->journal;
492 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
494 /* No space available on this device */
495 if (next == ja->discard_idx)
500 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
508 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
510 if (!journal_full(&c->journal))
511 __closure_wake_up(&c->journal.wait);
514 void bch_journal_next(struct journal *j)
518 j->cur = (j->cur == j->w)
523 * The fifo_push() needs to happen at the same time as j->seq is
524 * incremented for last_seq() to be calculated correctly
526 BUG_ON(!fifo_push(&j->pin, p));
527 atomic_set(&fifo_back(&j->pin), 1);
529 j->cur->data->seq = ++j->seq;
530 j->cur->need_write = false;
531 j->cur->data->keys = 0;
533 if (fifo_full(&j->pin))
534 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
537 static void journal_write_endio(struct bio *bio, int error)
539 struct journal_write *w = bio->bi_private;
541 cache_set_err_on(error, w->c, "journal io error");
542 closure_put(&w->c->journal.io);
545 static void journal_write(struct closure *);
547 static void journal_write_done(struct closure *cl)
549 struct journal *j = container_of(cl, struct journal, io);
550 struct journal_write *w = (j->cur == j->w)
554 __closure_wake_up(&w->wait);
555 continue_at_nobarrier(cl, journal_write, system_wq);
558 static void journal_write_unlocked(struct closure *cl)
559 __releases(c->journal.lock)
561 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
563 struct journal_write *w = c->journal.cur;
564 struct bkey *k = &c->journal.key;
565 unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
568 struct bio_list list;
569 bio_list_init(&list);
571 if (!w->need_write) {
573 * XXX: have to unlock closure before we unlock journal lock,
574 * else we race with bch_journal(). But this way we race
575 * against cache set unregister. Doh.
577 set_closure_fn(cl, NULL, NULL);
578 closure_sub(cl, CLOSURE_RUNNING + 1);
579 spin_unlock(&c->journal.lock);
581 } else if (journal_full(&c->journal)) {
583 spin_unlock(&c->journal.lock);
585 btree_flush_write(c);
586 continue_at(cl, journal_write, system_wq);
589 c->journal.blocks_free -= set_blocks(w->data, c);
591 w->data->btree_level = c->root->level;
593 bkey_copy(&w->data->btree_root, &c->root->key);
594 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
596 for_each_cache(ca, c, i)
597 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
599 w->data->magic = jset_magic(&c->sb);
600 w->data->version = BCACHE_JSET_VERSION;
601 w->data->last_seq = last_seq(&c->journal);
602 w->data->csum = csum_set(w->data);
604 for (i = 0; i < KEY_PTRS(k); i++) {
605 ca = PTR_CACHE(c, k, i);
606 bio = &ca->journal.bio;
608 atomic_long_add(sectors, &ca->meta_sectors_written);
611 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
612 bio->bi_bdev = ca->bdev;
613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
614 bio->bi_iter.bi_size = sectors << 9;
616 bio->bi_end_io = journal_write_endio;
618 bch_bio_map(bio, w->data);
620 trace_bcache_journal_write(bio);
621 bio_list_add(&list, bio);
623 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
625 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
628 atomic_dec_bug(&fifo_back(&c->journal.pin));
629 bch_journal_next(&c->journal);
632 spin_unlock(&c->journal.lock);
634 while ((bio = bio_list_pop(&list)))
635 closure_bio_submit(bio, cl, c->cache[0]);
637 continue_at(cl, journal_write_done, NULL);
640 static void journal_write(struct closure *cl)
642 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
644 spin_lock(&c->journal.lock);
645 journal_write_unlocked(cl);
648 static void journal_try_write(struct cache_set *c)
649 __releases(c->journal.lock)
651 struct closure *cl = &c->journal.io;
652 struct journal_write *w = c->journal.cur;
654 w->need_write = true;
656 if (closure_trylock(cl, &c->cl))
657 journal_write_unlocked(cl);
659 spin_unlock(&c->journal.lock);
662 static struct journal_write *journal_wait_for_write(struct cache_set *c,
668 closure_init_stack(&cl);
670 spin_lock(&c->journal.lock);
673 struct journal_write *w = c->journal.cur;
675 sectors = __set_blocks(w->data, w->data->keys + nkeys,
676 c) * c->sb.block_size;
678 if (sectors <= min_t(size_t,
679 c->journal.blocks_free * c->sb.block_size,
680 PAGE_SECTORS << JSET_BITS))
683 /* XXX: tracepoint */
684 if (!journal_full(&c->journal)) {
685 trace_bcache_journal_entry_full(c);
688 * XXX: If we were inserting so many keys that they
689 * won't fit in an _empty_ journal write, we'll
690 * deadlock. For now, handle this in
691 * bch_keylist_realloc() - but something to think about.
693 BUG_ON(!w->data->keys);
695 closure_wait(&w->wait, &cl);
696 journal_try_write(c); /* unlocks */
698 trace_bcache_journal_full(c);
700 closure_wait(&c->journal.wait, &cl);
702 spin_unlock(&c->journal.lock);
704 btree_flush_write(c);
708 spin_lock(&c->journal.lock);
712 static void journal_write_work(struct work_struct *work)
714 struct cache_set *c = container_of(to_delayed_work(work),
717 spin_lock(&c->journal.lock);
718 journal_try_write(c);
722 * Entry point to the journalling code - bio_insert() and btree_invalidate()
723 * pass bch_journal() a list of keys to be journalled, and then
724 * bch_journal() hands those same keys off to btree_insert_async()
727 atomic_t *bch_journal(struct cache_set *c,
728 struct keylist *keys,
729 struct closure *parent)
731 struct journal_write *w;
734 if (!CACHE_SYNC(&c->sb))
737 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
739 memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
740 w->data->keys += bch_keylist_nkeys(keys);
742 ret = &fifo_back(&c->journal.pin);
746 closure_wait(&w->wait, parent);
747 journal_try_write(c);
748 } else if (!w->need_write) {
749 schedule_delayed_work(&c->journal.work,
750 msecs_to_jiffies(c->journal_delay_ms));
751 spin_unlock(&c->journal.lock);
753 spin_unlock(&c->journal.lock);
760 void bch_journal_meta(struct cache_set *c, struct closure *cl)
765 bch_keylist_init(&keys);
767 ref = bch_journal(c, &keys, cl);
772 void bch_journal_free(struct cache_set *c)
774 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
775 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
776 free_fifo(&c->journal.pin);
779 int bch_journal_alloc(struct cache_set *c)
781 struct journal *j = &c->journal;
783 closure_init_unlocked(&j->io);
784 spin_lock_init(&j->lock);
785 INIT_DELAYED_WORK(&j->work, journal_write_work);
787 c->journal_delay_ms = 100;
792 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
793 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
794 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))