1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
6 #include "btree_write_buffer.h"
11 #include "journal_io.h"
12 #include "journal_reclaim.h"
14 #include "sb-members.h"
17 #include <linux/kthread.h>
18 #include <linux/sched/mm.h>
20 /* Free space calculations: */
22 static unsigned journal_space_from(struct journal_device *ja,
23 enum journal_space_from from)
26 case journal_space_discarded:
27 return ja->discard_idx;
28 case journal_space_clean_ondisk:
29 return ja->dirty_idx_ondisk;
30 case journal_space_clean:
37 unsigned bch2_journal_dev_buckets_available(struct journal *j,
38 struct journal_device *ja,
39 enum journal_space_from from)
41 unsigned available = (journal_space_from(ja, from) -
42 ja->cur_idx - 1 + ja->nr) % ja->nr;
45 * Don't use the last bucket unless writing the new last_seq
46 * will make another bucket available:
48 if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
54 void bch2_journal_set_watermark(struct journal *j)
56 struct bch_fs *c = container_of(j, struct bch_fs, journal);
57 bool low_on_space = j->space[journal_space_clean].total * 4 <=
58 j->space[journal_space_total].total;
59 bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4;
60 bool low_on_wb = bch2_btree_write_buffer_must_wait(c);
61 unsigned watermark = low_on_space || low_on_pin || low_on_wb
62 ? BCH_WATERMARK_reclaim
63 : BCH_WATERMARK_stripe;
65 if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space], low_on_space) ||
66 track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin], low_on_pin) ||
67 track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb))
68 trace_and_count(c, journal_full, c);
70 swap(watermark, j->watermark);
71 if (watermark > j->watermark)
75 static struct journal_space
76 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
77 enum journal_space_from from)
79 struct journal_device *ja = &ca->journal;
80 unsigned sectors, buckets, unwritten;
83 if (from == journal_space_total)
84 return (struct journal_space) {
85 .next_entry = ca->mi.bucket_size,
86 .total = ca->mi.bucket_size * ja->nr,
89 buckets = bch2_journal_dev_buckets_available(j, ja, from);
90 sectors = ja->sectors_free;
93 * We that we don't allocate the space for a journal entry
94 * until we write it out - thus, account for it here:
96 for (seq = journal_last_unwritten_seq(j);
97 seq <= journal_cur_seq(j);
99 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
104 /* entry won't fit on this device, skip: */
105 if (unwritten > ca->mi.bucket_size)
108 if (unwritten >= sectors) {
115 sectors = ca->mi.bucket_size;
118 sectors -= unwritten;
121 if (sectors < ca->mi.bucket_size && buckets) {
123 sectors = ca->mi.bucket_size;
126 return (struct journal_space) {
127 .next_entry = sectors,
128 .total = sectors + buckets * ca->mi.bucket_size,
132 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
133 enum journal_space_from from)
135 struct bch_fs *c = container_of(j, struct bch_fs, journal);
136 unsigned pos, nr_devs = 0;
137 struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
139 BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
142 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
146 space = journal_dev_space_available(j, ca, from);
147 if (!space.next_entry)
150 for (pos = 0; pos < nr_devs; pos++)
151 if (space.total > dev_space[pos].total)
154 array_insert_item(dev_space, nr_devs, pos, space);
158 if (nr_devs < nr_devs_want)
159 return (struct journal_space) { 0, 0 };
162 * We sorted largest to smallest, and we want the smallest out of the
163 * @nr_devs_want largest devices:
165 return dev_space[nr_devs_want - 1];
168 void bch2_journal_space_available(struct journal *j)
170 struct bch_fs *c = container_of(j, struct bch_fs, journal);
171 unsigned clean, clean_ondisk, total;
172 unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
173 j->buf[1].buf_size >> 9);
174 unsigned nr_online = 0, nr_devs_want;
175 bool can_discard = false;
178 lockdep_assert_held(&j->lock);
181 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
182 struct journal_device *ja = &ca->journal;
187 while (ja->dirty_idx != ja->cur_idx &&
188 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
189 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
191 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
192 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
193 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
195 if (ja->discard_idx != ja->dirty_idx_ondisk)
198 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
203 j->can_discard = can_discard;
205 if (nr_online < metadata_replicas_required(c)) {
206 ret = JOURNAL_ERR_insufficient_devices;
210 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
212 for (unsigned i = 0; i < journal_space_nr; i++)
213 j->space[i] = __journal_space_available(j, nr_devs_want, i);
215 clean_ondisk = j->space[journal_space_clean_ondisk].total;
216 clean = j->space[journal_space_clean].total;
217 total = j->space[journal_space_total].total;
219 if (!j->space[journal_space_discarded].next_entry)
220 ret = JOURNAL_ERR_journal_full;
222 if ((j->space[journal_space_clean_ondisk].next_entry <
223 j->space[journal_space_clean_ondisk].total) &&
224 (clean - clean_ondisk <= total / 8) &&
225 (clean_ondisk * 2 > clean))
226 set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
228 clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
230 bch2_journal_set_watermark(j);
232 j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
233 j->cur_entry_error = ret;
239 /* Discards - last part of journal reclaim: */
241 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
246 ret = ja->discard_idx != ja->dirty_idx_ondisk;
247 spin_unlock(&j->lock);
253 * Advance ja->discard_idx as long as it points to buckets that are no longer
254 * dirty, issuing discards if necessary:
256 void bch2_journal_do_discards(struct journal *j)
258 struct bch_fs *c = container_of(j, struct bch_fs, journal);
260 mutex_lock(&j->discard_lock);
262 for_each_rw_member(c, ca) {
263 struct journal_device *ja = &ca->journal;
265 while (should_discard_bucket(j, ja)) {
266 if (!c->opts.nochanges &&
268 bdev_max_discard_sectors(ca->disk_sb.bdev))
269 blkdev_issue_discard(ca->disk_sb.bdev,
271 ja->buckets[ja->discard_idx]),
272 ca->mi.bucket_size, GFP_NOFS);
275 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
277 bch2_journal_space_available(j);
278 spin_unlock(&j->lock);
282 mutex_unlock(&j->discard_lock);
286 * Journal entry pinning - machinery for holding a reference on a given journal
287 * entry, holding it open to ensure it gets replayed during recovery:
290 void bch2_journal_reclaim_fast(struct journal *j)
294 lockdep_assert_held(&j->lock);
297 * Unpin journal entries whose reference counts reached zero, meaning
298 * all btree nodes got written out
300 while (!fifo_empty(&j->pin) &&
301 j->pin.front <= j->seq_ondisk &&
302 !atomic_read(&fifo_peek_front(&j->pin).count)) {
308 bch2_journal_space_available(j);
311 bool __bch2_journal_pin_put(struct journal *j, u64 seq)
313 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
315 return atomic_dec_and_test(&pin_list->count);
318 void bch2_journal_pin_put(struct journal *j, u64 seq)
320 if (__bch2_journal_pin_put(j, seq)) {
322 bch2_journal_reclaim_fast(j);
323 spin_unlock(&j->lock);
327 static inline bool __journal_pin_drop(struct journal *j,
328 struct journal_entry_pin *pin)
330 struct journal_entry_pin_list *pin_list;
332 if (!journal_pin_active(pin))
335 if (j->flush_in_progress == pin)
336 j->flush_in_progress_dropped = true;
338 pin_list = journal_seq_pin(j, pin->seq);
340 list_del_init(&pin->list);
343 * Unpinning a journal entry may make journal_next_bucket() succeed, if
344 * writing a new last_seq will now make another bucket available:
346 return atomic_dec_and_test(&pin_list->count) &&
347 pin_list == &fifo_peek_front(&j->pin);
350 void bch2_journal_pin_drop(struct journal *j,
351 struct journal_entry_pin *pin)
354 if (__journal_pin_drop(j, pin))
355 bch2_journal_reclaim_fast(j);
356 spin_unlock(&j->lock);
359 static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
361 if (fn == bch2_btree_node_flush0 ||
362 fn == bch2_btree_node_flush1)
363 return JOURNAL_PIN_btree;
364 else if (fn == bch2_btree_key_cache_journal_flush)
365 return JOURNAL_PIN_key_cache;
367 return JOURNAL_PIN_other;
370 static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
371 struct journal_entry_pin *pin,
372 journal_pin_flush_fn flush_fn,
373 enum journal_pin_type type)
375 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
378 * flush_fn is how we identify journal pins in debugfs, so must always
379 * exist, even if it doesn't do anything:
383 atomic_inc(&pin_list->count);
385 pin->flush = flush_fn;
386 list_add(&pin->list, &pin_list->list[type]);
389 void bch2_journal_pin_copy(struct journal *j,
390 struct journal_entry_pin *dst,
391 struct journal_entry_pin *src,
392 journal_pin_flush_fn flush_fn)
396 u64 seq = READ_ONCE(src->seq);
398 if (seq < journal_last_seq(j)) {
400 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
401 * the src pin - with the pin dropped, the entry to pin might no
402 * longer to exist, but that means there's no longer anything to
403 * copy and we can bail out here:
405 spin_unlock(&j->lock);
409 bool reclaim = __journal_pin_drop(j, dst);
411 bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(flush_fn));
414 bch2_journal_reclaim_fast(j);
417 * If the journal is currently full, we might want to call flush_fn
420 if (seq == journal_last_seq(j))
422 spin_unlock(&j->lock);
425 void bch2_journal_pin_set(struct journal *j, u64 seq,
426 struct journal_entry_pin *pin,
427 journal_pin_flush_fn flush_fn)
431 BUG_ON(seq < journal_last_seq(j));
433 bool reclaim = __journal_pin_drop(j, pin);
435 bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(flush_fn));
438 bch2_journal_reclaim_fast(j);
440 * If the journal is currently full, we might want to call flush_fn
443 if (seq == journal_last_seq(j))
446 spin_unlock(&j->lock);
450 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
454 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
456 BUG_ON(journal_pin_active(pin));
458 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
462 * Journal reclaim: flush references to open journal entries to reclaim space in
465 * May be done by the journal code in the background as needed to free up space
466 * for more journal entries, or as part of doing a clean shutdown, or to migrate
467 * data off of a specific device:
470 static struct journal_entry_pin *
471 journal_get_next_pin(struct journal *j,
473 unsigned allowed_below_seq,
474 unsigned allowed_above_seq,
477 struct journal_entry_pin_list *pin_list;
478 struct journal_entry_pin *ret = NULL;
481 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
482 if (*seq > seq_to_flush && !allowed_above_seq)
485 for (i = 0; i < JOURNAL_PIN_NR; i++)
486 if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
487 ((1U << i) & allowed_above_seq)) {
488 ret = list_first_entry_or_null(&pin_list->list[i],
489 struct journal_entry_pin, list);
498 /* returns true if we did work */
499 static size_t journal_flush_pins(struct journal *j,
501 unsigned allowed_below_seq,
502 unsigned allowed_above_seq,
504 unsigned min_key_cache)
506 struct journal_entry_pin *pin;
507 size_t nr_flushed = 0;
508 journal_pin_flush_fn flush_fn;
512 lockdep_assert_held(&j->reclaim_lock);
515 unsigned allowed_above = allowed_above_seq;
516 unsigned allowed_below = allowed_below_seq;
524 allowed_above |= 1U << JOURNAL_PIN_key_cache;
525 allowed_below |= 1U << JOURNAL_PIN_key_cache;
530 j->last_flushed = jiffies;
533 pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
535 BUG_ON(j->flush_in_progress);
536 j->flush_in_progress = pin;
537 j->flush_in_progress_dropped = false;
538 flush_fn = pin->flush;
540 spin_unlock(&j->lock);
545 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
551 err = flush_fn(j, pin, seq);
554 /* Pin might have been dropped or rearmed: */
555 if (likely(!err && !j->flush_in_progress_dropped))
556 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
557 j->flush_in_progress = NULL;
558 j->flush_in_progress_dropped = false;
559 spin_unlock(&j->lock);
561 wake_up(&j->pin_flush_wait);
572 static u64 journal_seq_to_flush(struct journal *j)
574 struct bch_fs *c = container_of(j, struct bch_fs, journal);
575 u64 seq_to_flush = 0;
579 for_each_rw_member(c, ca) {
580 struct journal_device *ja = &ca->journal;
581 unsigned nr_buckets, bucket_to_flush;
586 /* Try to keep the journal at most half full: */
587 nr_buckets = ja->nr / 2;
589 nr_buckets = min(nr_buckets, ja->nr);
591 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
592 seq_to_flush = max(seq_to_flush,
593 ja->bucket_seq[bucket_to_flush]);
596 /* Also flush if the pin fifo is more than half full */
597 seq_to_flush = max_t(s64, seq_to_flush,
598 (s64) journal_cur_seq(j) -
600 spin_unlock(&j->lock);
606 * __bch2_journal_reclaim - free up journal buckets
608 * @direct: direct or background reclaim?
609 * @kicked: requested to run since we last ran?
610 * Returns: 0 on success, or -EIO if the journal has been shutdown
612 * Background journal reclaim writes out btree nodes. It should be run
613 * early enough so that we never completely run out of journal buckets.
615 * High watermarks for triggering background reclaim:
616 * - FIFO has fewer than 512 entries left
617 * - fewer than 25% journal buckets free
619 * Background reclaim runs until low watermarks are reached:
620 * - FIFO has more than 1024 entries left
621 * - more than 50% journal buckets free
623 * As long as a reclaim can complete in the time it takes to fill up
624 * 512 journal entries or 25% of all journal buckets, then
625 * journal_next_bucket() should not stall.
627 static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
629 struct bch_fs *c = container_of(j, struct bch_fs, journal);
630 bool kthread = (current->flags & PF_KTHREAD) != 0;
632 size_t min_nr, min_key_cache, nr_flushed;
637 * We can't invoke memory reclaim while holding the reclaim_lock -
638 * journal reclaim is required to make progress for memory reclaim
639 * (cleaning the caches), so we can't get stuck in memory reclaim while
640 * we're holding the reclaim lock:
642 lockdep_assert_held(&j->reclaim_lock);
643 flags = memalloc_noreclaim_save();
646 if (kthread && kthread_should_stop())
649 if (bch2_journal_error(j)) {
654 bch2_journal_do_discards(j);
656 seq_to_flush = journal_seq_to_flush(j);
660 * If it's been longer than j->reclaim_delay_ms since we last flushed,
661 * make sure to flush at least one journal pin:
663 if (time_after(jiffies, j->last_flushed +
664 msecs_to_jiffies(c->opts.journal_reclaim_delay)))
667 if (j->watermark != BCH_WATERMARK_stripe)
670 if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
673 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
675 trace_and_count(c, journal_reclaim_start, c,
677 min_nr, min_key_cache,
678 atomic_read(&c->btree_cache.dirty),
680 atomic_long_read(&c->btree_key_cache.nr_dirty),
681 atomic_long_read(&c->btree_key_cache.nr_keys));
683 nr_flushed = journal_flush_pins(j, seq_to_flush,
685 min_nr, min_key_cache);
688 j->nr_direct_reclaim += nr_flushed;
690 j->nr_background_reclaim += nr_flushed;
691 trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
694 wake_up(&j->reclaim_wait);
695 } while ((min_nr || min_key_cache) && nr_flushed && !direct);
697 memalloc_noreclaim_restore(flags);
702 int bch2_journal_reclaim(struct journal *j)
704 return __bch2_journal_reclaim(j, true, true);
707 static int bch2_journal_reclaim_thread(void *arg)
709 struct journal *j = arg;
710 struct bch_fs *c = container_of(j, struct bch_fs, journal);
711 unsigned long delay, now;
717 j->last_flushed = jiffies;
719 while (!ret && !kthread_should_stop()) {
720 bool kicked = j->reclaim_kicked;
722 j->reclaim_kicked = false;
724 mutex_lock(&j->reclaim_lock);
725 ret = __bch2_journal_reclaim(j, false, kicked);
726 mutex_unlock(&j->reclaim_lock);
729 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
730 j->next_reclaim = j->last_flushed + delay;
732 if (!time_in_range(j->next_reclaim, now, now + delay))
733 j->next_reclaim = now + delay;
736 set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
737 if (kthread_should_stop())
739 if (j->reclaim_kicked)
743 journal_empty = fifo_empty(&j->pin);
744 spin_unlock(&j->lock);
748 else if (time_after(j->next_reclaim, jiffies))
749 schedule_timeout(j->next_reclaim - jiffies);
753 __set_current_state(TASK_RUNNING);
759 void bch2_journal_reclaim_stop(struct journal *j)
761 struct task_struct *p = j->reclaim_thread;
763 j->reclaim_thread = NULL;
771 int bch2_journal_reclaim_start(struct journal *j)
773 struct bch_fs *c = container_of(j, struct bch_fs, journal);
774 struct task_struct *p;
777 if (j->reclaim_thread)
780 p = kthread_create(bch2_journal_reclaim_thread, j,
781 "bch-reclaim/%s", c->name);
782 ret = PTR_ERR_OR_ZERO(p);
783 bch_err_msg(c, ret, "creating journal reclaim thread");
788 j->reclaim_thread = p;
793 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
798 ret = bch2_journal_error(j);
802 mutex_lock(&j->reclaim_lock);
804 if (journal_flush_pins(j, seq_to_flush,
805 (1U << JOURNAL_PIN_key_cache)|
806 (1U << JOURNAL_PIN_other), 0, 0, 0) ||
807 journal_flush_pins(j, seq_to_flush,
808 (1U << JOURNAL_PIN_btree), 0, 0, 0))
811 if (seq_to_flush > journal_cur_seq(j))
812 bch2_journal_entry_close(j);
816 * If journal replay hasn't completed, the unreplayed journal entries
817 * hold refs on their corresponding sequence numbers
819 ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
820 journal_last_seq(j) > seq_to_flush ||
823 spin_unlock(&j->lock);
824 mutex_unlock(&j->reclaim_lock);
829 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
831 /* time_stats this */
832 bool did_work = false;
834 if (!test_bit(JOURNAL_STARTED, &j->flags))
837 closure_wait_event(&j->async_wait,
838 journal_flush_done(j, seq_to_flush, &did_work));
843 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
845 struct bch_fs *c = container_of(j, struct bch_fs, journal);
846 struct journal_entry_pin_list *p;
851 fifo_for_each_entry_ptr(p, &j->pin, iter)
853 ? bch2_dev_list_has_dev(p->devs, dev_idx)
854 : p->devs.nr < c->opts.metadata_replicas)
856 spin_unlock(&j->lock);
858 bch2_journal_flush_pins(j, seq);
860 ret = bch2_journal_error(j);
864 mutex_lock(&c->replicas_gc_lock);
865 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
868 * Now that we've populated replicas_gc, write to the journal to mark
869 * active journal devices. This handles the case where the journal might
870 * be empty. Otherwise we could clear all journal replicas and
871 * temporarily put the fs into an unrecoverable state. Journal recovery
872 * expects to find devices marked for journal data on unclean mount.
874 ret = bch2_journal_meta(&c->journal);
881 struct bch_replicas_padded replicas;
883 seq = max(seq, journal_last_seq(j));
884 if (seq >= j->pin.back)
886 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
887 journal_seq_pin(j, seq)->devs);
890 if (replicas.e.nr_devs) {
891 spin_unlock(&j->lock);
892 ret = bch2_mark_replicas(c, &replicas.e);
896 spin_unlock(&j->lock);
898 ret = bch2_replicas_gc_end(c, ret);
899 mutex_unlock(&c->replicas_gc_lock);