1 // SPDX-License-Identifier: GPL-2.0
3 * bcache journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
13 #include <trace/events/bcache.h>
16 * Journal replay/recovery:
18 * This code is all driven from run_cache_set(); we first read the journal
19 * entries, do some other stuff, then we mark all the keys in the journal
20 * entries (same as garbage collection would), then we replay them - reinserting
21 * them into the cache in precisely the same order as they appear in the
24 * We only journal keys that go in leaf nodes, which simplifies things quite a
28 static void journal_read_endio(struct bio *bio)
30 struct closure *cl = bio->bi_private;
35 static int journal_read_bucket(struct cache *ca, struct list_head *list,
36 unsigned int bucket_index)
38 struct journal_device *ja = &ca->journal;
39 struct bio *bio = &ja->bio;
41 struct journal_replay *i;
42 struct jset *j, *data = ca->set->journal.w[0].data;
44 unsigned int len, left, offset = 0;
46 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
48 closure_init_stack(&cl);
50 pr_debug("reading %u", bucket_index);
52 while (offset < ca->sb.bucket_size) {
53 reread: left = ca->sb.bucket_size - offset;
54 len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
57 bio->bi_iter.bi_sector = bucket + offset;
58 bio_set_dev(bio, ca->bdev);
59 bio->bi_iter.bi_size = len << 9;
61 bio->bi_end_io = journal_read_endio;
62 bio->bi_private = &cl;
63 bio_set_op_attrs(bio, REQ_OP_READ, 0);
64 bch_bio_map(bio, data);
66 closure_bio_submit(ca->set, bio, &cl);
69 /* This function could be simpler now since we no longer write
70 * journal entries that overlap bucket boundaries; this means
71 * the start of a bucket will always have a valid journal entry
72 * if it has any journal entries at all.
77 struct list_head *where;
78 size_t blocks, bytes = set_bytes(j);
80 if (j->magic != jset_magic(&ca->sb)) {
81 pr_debug("%u: bad magic", bucket_index);
85 if (bytes > left << 9 ||
86 bytes > PAGE_SIZE << JSET_BITS) {
87 pr_info("%u: too big, %zu bytes, offset %u",
88 bucket_index, bytes, offset);
95 if (j->csum != csum_set(j)) {
96 pr_info("%u: bad csum, %zu bytes, offset %u",
97 bucket_index, bytes, offset);
101 blocks = set_blocks(j, block_bytes(ca->set));
103 while (!list_empty(list)) {
104 i = list_first_entry(list,
105 struct journal_replay, list);
106 if (i->j.seq >= j->last_seq)
112 list_for_each_entry_reverse(i, list, list) {
113 if (j->seq == i->j.seq)
116 if (j->seq < i->j.last_seq)
119 if (j->seq > i->j.seq) {
127 i = kmalloc(offsetof(struct journal_replay, j) +
131 memcpy(&i->j, j, bytes);
132 list_add(&i->list, where);
135 ja->seq[bucket_index] = j->seq;
137 offset += blocks * ca->sb.block_size;
138 len -= blocks * ca->sb.block_size;
139 j = ((void *) j) + blocks * block_bytes(ca);
146 int bch_journal_read(struct cache_set *c, struct list_head *list)
148 #define read_bucket(b) \
150 int ret = journal_read_bucket(ca, list, b); \
151 __set_bit(b, bitmap); \
160 for_each_cache(ca, c, iter) {
161 struct journal_device *ja = &ca->journal;
162 DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
163 unsigned int i, l, r, m;
166 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
167 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
170 * Read journal buckets ordered by golden ratio hash to quickly
171 * find a sequence of buckets with valid journal entries
173 for (i = 0; i < ca->sb.njournal_buckets; i++) {
175 * We must try the index l with ZERO first for
176 * correctness due to the scenario that the journal
177 * bucket is circular buffer which might have wrapped
179 l = (i * 2654435769U) % ca->sb.njournal_buckets;
181 if (test_bit(l, bitmap))
189 * If that fails, check all the buckets we haven't checked
192 pr_debug("falling back to linear search");
194 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
195 l < ca->sb.njournal_buckets;
196 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
201 /* no journal entries on this device? */
202 if (l == ca->sb.njournal_buckets)
205 BUG_ON(list_empty(list));
209 r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
210 pr_debug("starting binary search, l %u r %u", l, r);
213 seq = list_entry(list->prev, struct journal_replay,
219 if (seq != list_entry(list->prev, struct journal_replay,
227 * Read buckets in reverse order until we stop finding more
230 pr_debug("finishing up: m %u njournal_buckets %u",
231 m, ca->sb.njournal_buckets);
236 l = ca->sb.njournal_buckets - 1;
241 if (test_bit(l, bitmap))
250 for (i = 0; i < ca->sb.njournal_buckets; i++)
251 if (ja->seq[i] > seq) {
254 * When journal_reclaim() goes to allocate for
255 * the first time, it'll use the bucket after
259 ja->last_idx = ja->discard_idx = (i + 1) %
260 ca->sb.njournal_buckets;
265 if (!list_empty(list))
266 c->journal.seq = list_entry(list->prev,
267 struct journal_replay,
274 void bch_journal_mark(struct cache_set *c, struct list_head *list)
278 struct journal_replay *i;
279 struct journal *j = &c->journal;
280 uint64_t last = j->seq;
283 * journal.pin should never fill up - we never write a journal
284 * entry when it would fill up. But if for some reason it does, we
285 * iterate over the list in reverse order so that we can just skip that
286 * refcount instead of bugging.
289 list_for_each_entry_reverse(i, list, list) {
290 BUG_ON(last < i->j.seq);
293 while (last-- != i->j.seq)
294 if (fifo_free(&j->pin) > 1) {
295 fifo_push_front(&j->pin, p);
296 atomic_set(&fifo_front(&j->pin), 0);
299 if (fifo_free(&j->pin) > 1) {
300 fifo_push_front(&j->pin, p);
301 i->pin = &fifo_front(&j->pin);
302 atomic_set(i->pin, 1);
306 k < bset_bkey_last(&i->j);
308 if (!__bch_extent_invalid(c, k)) {
311 for (j = 0; j < KEY_PTRS(k); j++)
312 if (ptr_available(c, k, j))
313 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
315 bch_initial_mark_key(c, 0, k);
320 int bch_journal_replay(struct cache_set *s, struct list_head *list)
322 int ret = 0, keys = 0, entries = 0;
324 struct journal_replay *i =
325 list_entry(list->prev, struct journal_replay, list);
327 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
328 struct keylist keylist;
330 list_for_each_entry(i, list, list) {
331 BUG_ON(i->pin && atomic_read(i->pin) != 1);
333 cache_set_err_on(n != i->j.seq, s,
334 "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
335 n, i->j.seq - 1, start, end);
338 k < bset_bkey_last(&i->j);
340 trace_bcache_journal_replay_key(k);
342 bch_keylist_init_single(&keylist, k);
344 ret = bch_btree_insert(s, &keylist, i->pin, NULL);
348 BUG_ON(!bch_keylist_empty(&keylist));
360 pr_info("journal replay done, %i keys in %i entries, seq %llu",
363 while (!list_empty(list)) {
364 i = list_first_entry(list, struct journal_replay, list);
373 #define journal_max_cmp(l, r) \
374 (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
375 fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
376 #define journal_min_cmp(l, r) \
377 (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
378 fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
380 static void btree_flush_write(struct cache_set *c)
383 * Try to find the btree node with that references the oldest journal
384 * entry, best is our current candidate and is locked if non NULL:
389 atomic_long_inc(&c->flush_write);
392 spin_lock(&c->journal.lock);
393 if (heap_empty(&c->flush_btree)) {
394 for_each_cached_btree(b, c, i)
395 if (btree_current_write(b)->journal) {
396 if (!heap_full(&c->flush_btree))
397 heap_add(&c->flush_btree, b,
399 else if (journal_max_cmp(b,
400 heap_peek(&c->flush_btree))) {
401 c->flush_btree.data[0] = b;
402 heap_sift(&c->flush_btree, 0,
407 for (i = c->flush_btree.used / 2 - 1; i >= 0; --i)
408 heap_sift(&c->flush_btree, i, journal_min_cmp);
412 heap_pop(&c->flush_btree, b, journal_min_cmp);
413 spin_unlock(&c->journal.lock);
416 mutex_lock(&b->write_lock);
417 if (!btree_current_write(b)->journal) {
418 mutex_unlock(&b->write_lock);
420 atomic_long_inc(&c->retry_flush_write);
424 __bch_btree_node_write(b, NULL);
425 mutex_unlock(&b->write_lock);
429 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
431 static void journal_discard_endio(struct bio *bio)
433 struct journal_device *ja =
434 container_of(bio, struct journal_device, discard_bio);
435 struct cache *ca = container_of(ja, struct cache, journal);
437 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
439 closure_wake_up(&ca->set->journal.wait);
440 closure_put(&ca->set->cl);
443 static void journal_discard_work(struct work_struct *work)
445 struct journal_device *ja =
446 container_of(work, struct journal_device, discard_work);
448 submit_bio(&ja->discard_bio);
451 static void do_journal_discard(struct cache *ca)
453 struct journal_device *ja = &ca->journal;
454 struct bio *bio = &ja->discard_bio;
457 ja->discard_idx = ja->last_idx;
461 switch (atomic_read(&ja->discard_in_flight)) {
462 case DISCARD_IN_FLIGHT:
466 ja->discard_idx = (ja->discard_idx + 1) %
467 ca->sb.njournal_buckets;
469 atomic_set(&ja->discard_in_flight, DISCARD_READY);
473 if (ja->discard_idx == ja->last_idx)
476 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
478 bio_init(bio, bio->bi_inline_vecs, 1);
479 bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
480 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
481 ca->sb.d[ja->discard_idx]);
482 bio_set_dev(bio, ca->bdev);
483 bio->bi_iter.bi_size = bucket_bytes(ca);
484 bio->bi_end_io = journal_discard_endio;
486 closure_get(&ca->set->cl);
487 INIT_WORK(&ja->discard_work, journal_discard_work);
488 queue_work(bch_journal_wq, &ja->discard_work);
492 static void journal_reclaim(struct cache_set *c)
494 struct bkey *k = &c->journal.key;
497 unsigned int iter, n = 0;
498 atomic_t p __maybe_unused;
500 atomic_long_inc(&c->reclaim);
502 while (!atomic_read(&fifo_front(&c->journal.pin)))
503 fifo_pop(&c->journal.pin, p);
505 last_seq = last_seq(&c->journal);
507 /* Update last_idx */
509 for_each_cache(ca, c, iter) {
510 struct journal_device *ja = &ca->journal;
512 while (ja->last_idx != ja->cur_idx &&
513 ja->seq[ja->last_idx] < last_seq)
514 ja->last_idx = (ja->last_idx + 1) %
515 ca->sb.njournal_buckets;
518 for_each_cache(ca, c, iter)
519 do_journal_discard(ca);
521 if (c->journal.blocks_free)
526 * XXX: Sort by free journal space
529 for_each_cache(ca, c, iter) {
530 struct journal_device *ja = &ca->journal;
531 unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
533 /* No space available on this device */
534 if (next == ja->discard_idx)
538 k->ptr[n++] = MAKE_PTR(0,
539 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
547 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
549 if (!journal_full(&c->journal))
550 __closure_wake_up(&c->journal.wait);
553 void bch_journal_next(struct journal *j)
557 j->cur = (j->cur == j->w)
562 * The fifo_push() needs to happen at the same time as j->seq is
563 * incremented for last_seq() to be calculated correctly
565 BUG_ON(!fifo_push(&j->pin, p));
566 atomic_set(&fifo_back(&j->pin), 1);
568 j->cur->data->seq = ++j->seq;
569 j->cur->dirty = false;
570 j->cur->need_write = false;
571 j->cur->data->keys = 0;
573 if (fifo_full(&j->pin))
574 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
577 static void journal_write_endio(struct bio *bio)
579 struct journal_write *w = bio->bi_private;
581 cache_set_err_on(bio->bi_status, w->c, "journal io error");
582 closure_put(&w->c->journal.io);
585 static void journal_write(struct closure *cl);
587 static void journal_write_done(struct closure *cl)
589 struct journal *j = container_of(cl, struct journal, io);
590 struct journal_write *w = (j->cur == j->w)
594 __closure_wake_up(&w->wait);
595 continue_at_nobarrier(cl, journal_write, bch_journal_wq);
598 static void journal_write_unlock(struct closure *cl)
599 __releases(&c->journal.lock)
601 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
603 c->journal.io_in_flight = 0;
604 spin_unlock(&c->journal.lock);
607 static void journal_write_unlocked(struct closure *cl)
608 __releases(c->journal.lock)
610 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
612 struct journal_write *w = c->journal.cur;
613 struct bkey *k = &c->journal.key;
614 unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
618 struct bio_list list;
620 bio_list_init(&list);
622 if (!w->need_write) {
623 closure_return_with_destructor(cl, journal_write_unlock);
625 } else if (journal_full(&c->journal)) {
627 spin_unlock(&c->journal.lock);
629 btree_flush_write(c);
630 continue_at(cl, journal_write, bch_journal_wq);
634 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
636 w->data->btree_level = c->root->level;
638 bkey_copy(&w->data->btree_root, &c->root->key);
639 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
641 for_each_cache(ca, c, i)
642 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
644 w->data->magic = jset_magic(&c->sb);
645 w->data->version = BCACHE_JSET_VERSION;
646 w->data->last_seq = last_seq(&c->journal);
647 w->data->csum = csum_set(w->data);
649 for (i = 0; i < KEY_PTRS(k); i++) {
650 ca = PTR_CACHE(c, k, i);
651 bio = &ca->journal.bio;
653 atomic_long_add(sectors, &ca->meta_sectors_written);
656 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
657 bio_set_dev(bio, ca->bdev);
658 bio->bi_iter.bi_size = sectors << 9;
660 bio->bi_end_io = journal_write_endio;
662 bio_set_op_attrs(bio, REQ_OP_WRITE,
663 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
664 bch_bio_map(bio, w->data);
666 trace_bcache_journal_write(bio);
667 bio_list_add(&list, bio);
669 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
671 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
674 atomic_dec_bug(&fifo_back(&c->journal.pin));
675 bch_journal_next(&c->journal);
678 spin_unlock(&c->journal.lock);
680 while ((bio = bio_list_pop(&list)))
681 closure_bio_submit(c, bio, cl);
683 continue_at(cl, journal_write_done, NULL);
686 static void journal_write(struct closure *cl)
688 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
690 spin_lock(&c->journal.lock);
691 journal_write_unlocked(cl);
694 static void journal_try_write(struct cache_set *c)
695 __releases(c->journal.lock)
697 struct closure *cl = &c->journal.io;
698 struct journal_write *w = c->journal.cur;
700 w->need_write = true;
702 if (!c->journal.io_in_flight) {
703 c->journal.io_in_flight = 1;
704 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
706 spin_unlock(&c->journal.lock);
710 static struct journal_write *journal_wait_for_write(struct cache_set *c,
712 __acquires(&c->journal.lock)
718 closure_init_stack(&cl);
720 spin_lock(&c->journal.lock);
723 struct journal_write *w = c->journal.cur;
725 sectors = __set_blocks(w->data, w->data->keys + nkeys,
726 block_bytes(c)) * c->sb.block_size;
728 if (sectors <= min_t(size_t,
729 c->journal.blocks_free * c->sb.block_size,
730 PAGE_SECTORS << JSET_BITS))
734 closure_wait(&c->journal.wait, &cl);
736 if (!journal_full(&c->journal)) {
738 trace_bcache_journal_entry_full(c);
741 * XXX: If we were inserting so many keys that they
742 * won't fit in an _empty_ journal write, we'll
743 * deadlock. For now, handle this in
744 * bch_keylist_realloc() - but something to think about.
746 BUG_ON(!w->data->keys);
748 journal_try_write(c); /* unlocks */
751 trace_bcache_journal_full(c);
754 spin_unlock(&c->journal.lock);
756 btree_flush_write(c);
760 spin_lock(&c->journal.lock);
765 static void journal_write_work(struct work_struct *work)
767 struct cache_set *c = container_of(to_delayed_work(work),
770 spin_lock(&c->journal.lock);
771 if (c->journal.cur->dirty)
772 journal_try_write(c);
774 spin_unlock(&c->journal.lock);
778 * Entry point to the journalling code - bio_insert() and btree_invalidate()
779 * pass bch_journal() a list of keys to be journalled, and then
780 * bch_journal() hands those same keys off to btree_insert_async()
783 atomic_t *bch_journal(struct cache_set *c,
784 struct keylist *keys,
785 struct closure *parent)
787 struct journal_write *w;
790 if (!CACHE_SYNC(&c->sb))
793 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
795 memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
796 w->data->keys += bch_keylist_nkeys(keys);
798 ret = &fifo_back(&c->journal.pin);
802 closure_wait(&w->wait, parent);
803 journal_try_write(c);
804 } else if (!w->dirty) {
806 schedule_delayed_work(&c->journal.work,
807 msecs_to_jiffies(c->journal_delay_ms));
808 spin_unlock(&c->journal.lock);
810 spin_unlock(&c->journal.lock);
817 void bch_journal_meta(struct cache_set *c, struct closure *cl)
822 bch_keylist_init(&keys);
824 ref = bch_journal(c, &keys, cl);
829 void bch_journal_free(struct cache_set *c)
831 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
832 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
833 free_fifo(&c->journal.pin);
834 free_heap(&c->flush_btree);
837 int bch_journal_alloc(struct cache_set *c)
839 struct journal *j = &c->journal;
841 spin_lock_init(&j->lock);
842 INIT_DELAYED_WORK(&j->work, journal_write_work);
844 c->journal_delay_ms = 100;
849 if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) ||
850 !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
851 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
852 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))