bcachefs: Simplify hash table checks
[linux-block.git] / fs / bcachefs / journal.c
CommitLineData
1c6fdbd8
KO
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bcachefs journalling code, for btree insertions
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcachefs.h"
7b3f84ea 9#include "alloc_foreground.h"
1c6fdbd8
KO
10#include "bkey_methods.h"
11#include "btree_gc.h"
bfcf840d 12#include "btree_update.h"
1c6fdbd8 13#include "buckets.h"
2940295c 14#include "error.h"
1c6fdbd8
KO
15#include "journal.h"
16#include "journal_io.h"
17#include "journal_reclaim.h"
18#include "journal_seq_blacklist.h"
19#include "super-io.h"
20#include "trace.h"
21
158eecb8
KO
22static u64 last_unwritten_seq(struct journal *j)
23{
24 union journal_res_state s = READ_ONCE(j->reservations);
25
26 lockdep_assert_held(&j->lock);
27
ebb84d09 28 return journal_cur_seq(j) - ((s.idx - s.unwritten_idx) & JOURNAL_BUF_MASK);
158eecb8
KO
29}
30
31static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
32{
33 return seq >= last_unwritten_seq(j);
34}
61ce38b8 35
d16b4a77 36static bool __journal_entry_is_open(union journal_res_state state)
1c6fdbd8 37{
d16b4a77 38 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
1c6fdbd8
KO
39}
40
d16b4a77 41static bool journal_entry_is_open(struct journal *j)
1c6fdbd8 42{
d16b4a77 43 return __journal_entry_is_open(j->reservations);
1c6fdbd8
KO
44}
45
158eecb8
KO
46static inline struct journal_buf *
47journal_seq_to_buf(struct journal *j, u64 seq)
48{
49 struct journal_buf *buf = NULL;
50
51 EBUG_ON(seq > journal_cur_seq(j));
52 EBUG_ON(seq == journal_cur_seq(j) &&
53 j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
54
55 if (journal_seq_unwritten(j, seq)) {
ebb84d09 56 buf = j->buf + (seq & JOURNAL_BUF_MASK);
158eecb8
KO
57 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
58 }
59 return buf;
60}
61
241e2636 62static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
1c6fdbd8 63{
241e2636
KO
64 INIT_LIST_HEAD(&p->list);
65 INIT_LIST_HEAD(&p->key_cache_list);
66 INIT_LIST_HEAD(&p->flushed);
67 atomic_set(&p->count, count);
68 p->devs.nr = 0;
69}
1c6fdbd8 70
241e2636
KO
71static void journal_pin_new_entry(struct journal *j)
72{
1c6fdbd8
KO
73 /*
74 * The fifo_push() needs to happen at the same time as j->seq is
75 * incremented for journal_last_seq() to be calculated correctly
76 */
77 atomic64_inc(&j->seq);
241e2636 78 journal_pin_list_init(fifo_push_ref(&j->pin), 1);
1c6fdbd8
KO
79}
80
81static void bch2_journal_buf_init(struct journal *j)
82{
83 struct journal_buf *buf = journal_cur_buf(j);
84
158eecb8 85 bkey_extent_init(&buf->key);
adbcada4
KO
86 buf->noflush = false;
87 buf->must_flush = false;
280249b9 88 buf->separate_flush = false;
158eecb8 89
1c6fdbd8
KO
90 memset(buf->has_inode, 0, sizeof(buf->has_inode));
91
92 memset(buf->data, 0, sizeof(*buf->data));
93 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
94 buf->data->u64s = 0;
95}
96
d16b4a77
KO
97void bch2_journal_halt(struct journal *j)
98{
99 union journal_res_state old, new;
100 u64 v = atomic64_read(&j->reservations.counter);
101
102 do {
103 old.v = new.v = v;
104 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
105 return;
106
107 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
108 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
109 old.v, new.v)) != old.v);
110
158eecb8 111 j->err_seq = journal_cur_seq(j);
d16b4a77
KO
112 journal_wake(j);
113 closure_wake_up(&journal_cur_buf(j)->wait);
d16b4a77
KO
114}
115
116/* journal entry close/open: */
117
ebb84d09 118void __bch2_journal_buf_put(struct journal *j)
d16b4a77 119{
d16b4a77
KO
120 closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
121}
122
123/*
124 * Returns true if journal entry is now closed:
ed9d58a2
KO
125 *
126 * We don't close a journal_buf until the next journal_buf is finished writing,
127 * and can be opened again - this also initializes the next journal_buf:
d16b4a77
KO
128 */
129static bool __journal_entry_close(struct journal *j)
1c6fdbd8
KO
130{
131 struct bch_fs *c = container_of(j, struct bch_fs, journal);
9c859dc9 132 struct journal_buf *buf = journal_cur_buf(j);
1c6fdbd8
KO
133 union journal_res_state old, new;
134 u64 v = atomic64_read(&j->reservations.counter);
d16b4a77 135 unsigned sectors;
1c6fdbd8
KO
136
137 lockdep_assert_held(&j->lock);
138
139 do {
140 old.v = new.v = v;
141 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
d16b4a77 142 return true;
1c6fdbd8 143
9c859dc9
KO
144 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
145 /* this entry will never be written: */
146 closure_wake_up(&buf->wait);
d16b4a77 147 return true;
9c859dc9 148 }
1c6fdbd8 149
d16b4a77
KO
150 if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
151 set_bit(JOURNAL_NEED_WRITE, &j->flags);
152 j->need_write_time = local_clock();
d16b4a77 153 }
1c6fdbd8 154
1c6fdbd8
KO
155 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
156 new.idx++;
ebb84d09
KO
157
158 if (new.idx == new.unwritten_idx)
159 return false;
1c6fdbd8
KO
160
161 BUG_ON(journal_state_count(new, new.idx));
162 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
163 old.v, new.v)) != old.v);
164
ed9d58a2 165 /* Close out old buffer: */
1c6fdbd8
KO
166 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
167
d16b4a77
KO
168 sectors = vstruct_blocks_plus(buf->data, c->block_bits,
169 buf->u64s_reserved) << c->block_bits;
170 BUG_ON(sectors > buf->sectors);
171 buf->sectors = sectors;
1c6fdbd8 172
4077991c
KO
173 /*
174 * We have to set last_seq here, _before_ opening a new journal entry:
175 *
176 * A threads may replace an old pin with a new pin on their current
177 * journal reservation - the expectation being that the journal will
178 * contain either what the old pin protected or what the new pin
179 * protects.
180 *
181 * After the old pin is dropped journal_last_seq() won't include the old
182 * pin, so we can only write the updated last_seq on the entry that
183 * contains whatever the new pin protects.
184 *
185 * Restated, we can _not_ update last_seq for a given entry if there
186 * could be a newer entry open with reservations/pins that have been
187 * taken against it.
188 *
189 * Hence, we want update/set last_seq on the current journal entry right
190 * before we open a new one:
191 */
1c6fdbd8
KO
192 buf->data->last_seq = cpu_to_le64(journal_last_seq(j));
193
ebb84d09
KO
194 __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
195
ed9d58a2 196 /* Initialize new buffer: */
241e2636 197 journal_pin_new_entry(j);
1c6fdbd8
KO
198
199 bch2_journal_buf_init(j);
200
201 cancel_delayed_work(&j->write_work);
ebb84d09 202 clear_bit(JOURNAL_NEED_WRITE, &j->flags);
1c6fdbd8 203
e5a66496
KO
204 bch2_journal_space_available(j);
205
ebb84d09 206 bch2_journal_buf_put(j, old.idx);
d16b4a77 207 return true;
1c6fdbd8
KO
208}
209
ebb84d09
KO
210static bool journal_entry_want_write(struct journal *j)
211{
212 union journal_res_state s = READ_ONCE(j->reservations);
213 bool ret = false;
214
215 /*
216 * Don't close it yet if we already have a write in flight, but do set
217 * NEED_WRITE:
218 */
219 if (s.idx != s.unwritten_idx)
220 set_bit(JOURNAL_NEED_WRITE, &j->flags);
221 else
222 ret = __journal_entry_close(j);
223
224 return ret;
225}
226
d16b4a77 227static bool journal_entry_close(struct journal *j)
1c6fdbd8 228{
d16b4a77 229 bool ret;
1c6fdbd8 230
d16b4a77 231 spin_lock(&j->lock);
ebb84d09 232 ret = journal_entry_want_write(j);
d16b4a77 233 spin_unlock(&j->lock);
1c6fdbd8 234
d16b4a77 235 return ret;
1c6fdbd8
KO
236}
237
238/*
239 * should _only_ called from journal_res_get() - when we actually want a
240 * journal reservation - journal entry is open means journal is dirty:
241 *
242 * returns:
d16b4a77
KO
243 * 0: success
244 * -ENOSPC: journal currently full, must invoke reclaim
245 * -EAGAIN: journal blocked, must wait
246 * -EROFS: insufficient rw devices or journal error
1c6fdbd8
KO
247 */
248static int journal_entry_open(struct journal *j)
249{
b7a9bbfc 250 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1c6fdbd8
KO
251 struct journal_buf *buf = journal_cur_buf(j);
252 union journal_res_state old, new;
e5a66496 253 int u64s;
1c6fdbd8
KO
254 u64 v;
255
b7a9bbfc
KO
256 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
257
1c6fdbd8
KO
258 lockdep_assert_held(&j->lock);
259 BUG_ON(journal_entry_is_open(j));
260
768ac639 261 if (j->blocked)
ed0e24c0 262 return cur_entry_blocked;
768ac639 263
e5a66496
KO
264 if (j->cur_entry_error)
265 return j->cur_entry_error;
1c6fdbd8 266
e5a66496 267 BUG_ON(!j->cur_entry_sectors);
1c6fdbd8 268
eac3ca0f 269 buf->u64s_reserved = j->entry_u64s_reserved;
d16b4a77
KO
270 buf->disk_sectors = j->cur_entry_sectors;
271 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
1c6fdbd8 272
d16b4a77
KO
273 u64s = (int) (buf->sectors << 9) / sizeof(u64) -
274 journal_entry_overhead(j);
275 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
1c6fdbd8
KO
276
277 if (u64s <= le32_to_cpu(buf->data->u64s))
ed0e24c0 278 return cur_entry_journal_full;
1c6fdbd8
KO
279
280 /*
281 * Must be set before marking the journal entry as open:
282 */
283 j->cur_entry_u64s = u64s;
284
285 v = atomic64_read(&j->reservations.counter);
286 do {
287 old.v = new.v = v;
288
289 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
ed0e24c0 290 return cur_entry_insufficient_devices;
1c6fdbd8
KO
291
292 /* Handle any already added entries */
293 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
db6447b3
KO
294
295 EBUG_ON(journal_state_count(new, new.idx));
d16b4a77 296 journal_state_inc(&new);
1c6fdbd8
KO
297 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
298 old.v, new.v)) != old.v);
299
300 if (j->res_get_blocked_start)
301 bch2_time_stats_update(j->blocked_time,
302 j->res_get_blocked_start);
303 j->res_get_blocked_start = 0;
304
305 mod_delayed_work(system_freezable_wq,
306 &j->write_work,
307 msecs_to_jiffies(j->write_delay_ms));
308 journal_wake(j);
d16b4a77 309 return 0;
1c6fdbd8
KO
310}
311
768ac639
KO
312static bool journal_quiesced(struct journal *j)
313{
ebb84d09
KO
314 union journal_res_state s = READ_ONCE(j->reservations);
315 bool ret = s.idx == s.unwritten_idx && !__journal_entry_is_open(s);
768ac639 316
d16b4a77
KO
317 if (!ret)
318 journal_entry_close(j);
768ac639
KO
319 return ret;
320}
321
322static void journal_quiesce(struct journal *j)
323{
324 wait_event(j->wait, journal_quiesced(j));
325}
326
1c6fdbd8
KO
327static void journal_write_work(struct work_struct *work)
328{
329 struct journal *j = container_of(work, struct journal, write_work.work);
330
9c859dc9 331 journal_entry_close(j);
1c6fdbd8
KO
332}
333
334/*
335 * Given an inode number, if that inode number has data in the journal that
336 * hasn't yet been flushed, return the journal sequence number that needs to be
337 * flushed:
338 */
339u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
340{
341 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
ebb84d09
KO
342 union journal_res_state s;
343 unsigned i;
344 u64 seq;
1c6fdbd8 345
1c6fdbd8
KO
346
347 spin_lock(&j->lock);
ebb84d09
KO
348 seq = journal_cur_seq(j);
349 s = READ_ONCE(j->reservations);
350 i = s.idx;
351
352 while (1) {
353 if (test_bit(h, j->buf[i].has_inode))
354 goto out;
355
356 if (i == s.unwritten_idx)
357 break;
358
359 i = (i - 1) & JOURNAL_BUF_MASK;
360 seq--;
361 }
362
363 seq = 0;
364out:
1c6fdbd8
KO
365 spin_unlock(&j->lock);
366
367 return seq;
368}
369
61ce38b8
KO
370void bch2_journal_set_has_inum(struct journal *j, u64 inode, u64 seq)
371{
372 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
373 struct journal_buf *buf;
374
375 spin_lock(&j->lock);
376
377 if ((buf = journal_seq_to_buf(j, seq)))
378 set_bit(h, buf->has_inode);
379
380 spin_unlock(&j->lock);
381}
382
1c6fdbd8 383static int __journal_res_get(struct journal *j, struct journal_res *res,
f1a79365 384 unsigned flags)
1c6fdbd8
KO
385{
386 struct bch_fs *c = container_of(j, struct bch_fs, journal);
387 struct journal_buf *buf;
2384db8f 388 bool can_discard;
1c6fdbd8
KO
389 int ret;
390retry:
f1a79365
KO
391 if (journal_res_get_fast(j, res, flags))
392 return 0;
1c6fdbd8 393
d16b4a77
KO
394 if (bch2_journal_error(j))
395 return -EROFS;
396
1c6fdbd8 397 spin_lock(&j->lock);
d16b4a77 398
1c6fdbd8
KO
399 /*
400 * Recheck after taking the lock, so we don't race with another thread
401 * that just did journal_entry_open() and call journal_entry_close()
402 * unnecessarily
403 */
f1a79365 404 if (journal_res_get_fast(j, res, flags)) {
1c6fdbd8 405 spin_unlock(&j->lock);
f1a79365 406 return 0;
1c6fdbd8
KO
407 }
408
68ef94a6
KO
409 if (!(flags & JOURNAL_RES_GET_RESERVED) &&
410 !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
411 /*
412 * Don't want to close current journal entry, just need to
413 * invoke reclaim:
414 */
ed0e24c0 415 ret = cur_entry_journal_full;
68ef94a6
KO
416 goto unlock;
417 }
418
1c6fdbd8
KO
419 /*
420 * If we couldn't get a reservation because the current buf filled up,
421 * and we had room for a bigger entry on disk, signal that we want to
422 * realloc the journal bufs:
423 */
424 buf = journal_cur_buf(j);
425 if (journal_entry_is_open(j) &&
d16b4a77
KO
426 buf->buf_size >> 9 < buf->disk_sectors &&
427 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
428 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
1c6fdbd8 429
d16b4a77
KO
430 if (journal_entry_is_open(j) &&
431 !__journal_entry_close(j)) {
f1a79365 432 /*
d16b4a77
KO
433 * We failed to get a reservation on the current open journal
434 * entry because it's full, and we can't close it because
435 * there's still a previous one in flight:
f1a79365 436 */
1c6fdbd8 437 trace_journal_entry_full(c);
ed0e24c0 438 ret = cur_entry_blocked;
d16b4a77
KO
439 } else {
440 ret = journal_entry_open(j);
1c6fdbd8 441 }
68ef94a6 442unlock:
ed0e24c0
KO
443 if ((ret && ret != cur_entry_insufficient_devices) &&
444 !j->res_get_blocked_start) {
d16b4a77 445 j->res_get_blocked_start = local_clock() ?: 1;
ed0e24c0
KO
446 trace_journal_full(c);
447 }
d16b4a77 448
2384db8f 449 can_discard = j->can_discard;
1c6fdbd8
KO
450 spin_unlock(&j->lock);
451
d16b4a77 452 if (!ret)
1c6fdbd8 453 goto retry;
2384db8f 454
2940295c
KO
455 if ((ret == cur_entry_journal_full ||
456 ret == cur_entry_journal_pin_full) &&
457 !can_discard &&
458 j->reservations.idx == j->reservations.unwritten_idx &&
459 (flags & JOURNAL_RES_GET_RESERVED)) {
460 char *journal_debug_buf = kmalloc(4096, GFP_ATOMIC);
461
462 bch_err(c, "Journal stuck!");
463 if (journal_debug_buf) {
464 bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
465 bch_err(c, "%s", journal_debug_buf);
466
467 bch2_journal_pins_to_text(&_PBUF(journal_debug_buf, 4096), j);
468 bch_err(c, "Journal pins:\n%s", journal_debug_buf);
469 kfree(journal_debug_buf);
470 }
471
472 bch2_fatal_error(c);
473 dump_stack();
474 }
475
ed0e24c0
KO
476 /*
477 * Journal is full - can't rely on reclaim from work item due to
478 * freezing:
479 */
480 if ((ret == cur_entry_journal_full ||
481 ret == cur_entry_journal_pin_full) &&
482 !(flags & JOURNAL_RES_GET_NONBLOCK)) {
483 if (can_discard) {
484 bch2_journal_do_discards(j);
485 goto retry;
2384db8f
KO
486 }
487
ed0e24c0
KO
488 if (mutex_trylock(&j->reclaim_lock)) {
489 bch2_journal_reclaim(j);
490 mutex_unlock(&j->reclaim_lock);
491 }
d16b4a77 492 }
1c6fdbd8 493
ed0e24c0 494 return ret == cur_entry_insufficient_devices ? -EROFS : -EAGAIN;
1c6fdbd8
KO
495}
496
497/*
498 * Essentially the entry function to the journaling code. When bcachefs is doing
499 * a btree insert, it calls this function to get the current journal write.
500 * Journal write is the structure used set up journal writes. The calling
501 * function will then add its keys to the structure, queuing them for the next
502 * write.
503 *
504 * To ensure forward progress, the current task must not be holding any
505 * btree node write locks.
506 */
507int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
f1a79365 508 unsigned flags)
1c6fdbd8
KO
509{
510 int ret;
511
e5a66496 512 closure_wait_event(&j->async_wait,
f1a79365
KO
513 (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
514 (flags & JOURNAL_RES_GET_NONBLOCK));
515 return ret;
1c6fdbd8
KO
516}
517
68ef94a6
KO
518/* journal_preres: */
519
520static bool journal_preres_available(struct journal *j,
521 struct journal_preres *res,
4efe71a6
KO
522 unsigned new_u64s,
523 unsigned flags)
68ef94a6 524{
671cc8a5 525 bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
68ef94a6 526
b7a9bbfc
KO
527 if (!ret && mutex_trylock(&j->reclaim_lock)) {
528 bch2_journal_reclaim(j);
529 mutex_unlock(&j->reclaim_lock);
530 }
68ef94a6
KO
531
532 return ret;
533}
534
535int __bch2_journal_preres_get(struct journal *j,
536 struct journal_preres *res,
4efe71a6
KO
537 unsigned new_u64s,
538 unsigned flags)
68ef94a6
KO
539{
540 int ret;
541
542 closure_wait_event(&j->preres_wait,
543 (ret = bch2_journal_error(j)) ||
4efe71a6 544 journal_preres_available(j, res, new_u64s, flags));
68ef94a6
KO
545 return ret;
546}
547
eac3ca0f
KO
548/* journal_entry_res: */
549
550void bch2_journal_entry_res_resize(struct journal *j,
551 struct journal_entry_res *res,
552 unsigned new_u64s)
553{
554 union journal_res_state state;
555 int d = new_u64s - res->u64s;
556
557 spin_lock(&j->lock);
558
559 j->entry_u64s_reserved += d;
560 if (d <= 0)
d16b4a77 561 goto out;
eac3ca0f 562
8db2acde 563 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
eac3ca0f
KO
564 smp_mb();
565 state = READ_ONCE(j->reservations);
566
567 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
568 state.cur_entry_offset > j->cur_entry_u64s) {
569 j->cur_entry_u64s += d;
570 /*
571 * Not enough room in current journal entry, have to flush it:
572 */
573 __journal_entry_close(j);
d16b4a77
KO
574 } else {
575 journal_cur_buf(j)->u64s_reserved += d;
eac3ca0f 576 }
eac3ca0f 577out:
d16b4a77 578 spin_unlock(&j->lock);
eac3ca0f 579 res->u64s += d;
eac3ca0f
KO
580}
581
582/* journal flushing: */
583
1c6fdbd8
KO
584/**
585 * bch2_journal_flush_seq_async - wait for a journal entry to be written
586 *
587 * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
588 * necessary
589 */
158eecb8 590int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
33b3b1dc 591 struct closure *parent)
1c6fdbd8
KO
592{
593 struct journal_buf *buf;
158eecb8 594 int ret = 0;
1c6fdbd8 595
adbcada4 596 if (seq <= j->flushed_seq_ondisk)
33b3b1dc
KO
597 return 1;
598
1c6fdbd8 599 spin_lock(&j->lock);
33b3b1dc 600
5ea037d0
KO
601 BUG_ON(seq > journal_cur_seq(j));
602
33b3b1dc 603 /* Recheck under lock: */
c5bb1690 604 if (j->err_seq && seq >= j->err_seq) {
158eecb8
KO
605 ret = -EIO;
606 goto out;
607 }
608
adbcada4 609 if (seq <= j->flushed_seq_ondisk) {
158eecb8
KO
610 ret = 1;
611 goto out;
612 }
1c6fdbd8 613
adbcada4
KO
614 /* if seq was written, but not flushed - flush a newer one instead */
615 seq = max(seq, last_unwritten_seq(j));
616
617recheck_need_open:
618 if (seq == journal_cur_seq(j) && !journal_entry_is_open(j)) {
619 struct journal_res res = { 0 };
620
621 spin_unlock(&j->lock);
622
623 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
624 if (ret)
625 return ret;
626
627 seq = res.seq;
628 buf = j->buf + (seq & JOURNAL_BUF_MASK);
629 buf->must_flush = true;
630 set_bit(JOURNAL_NEED_WRITE, &j->flags);
631
632 if (parent && !closure_wait(&buf->wait, parent))
1c6fdbd8
KO
633 BUG();
634
adbcada4
KO
635 bch2_journal_res_put(j, &res);
636
637 spin_lock(&j->lock);
638 goto want_write;
639 }
640
641 /*
642 * if write was kicked off without a flush, flush the next sequence
643 * number instead
644 */
645 buf = journal_seq_to_buf(j, seq);
646 if (buf->noflush) {
647 seq++;
648 goto recheck_need_open;
649 }
650
651 buf->must_flush = true;
652
653 if (parent && !closure_wait(&buf->wait, parent))
654 BUG();
655want_write:
9c859dc9 656 if (seq == journal_cur_seq(j))
ebb84d09 657 journal_entry_want_write(j);
158eecb8 658out:
d16b4a77 659 spin_unlock(&j->lock);
1c6fdbd8
KO
660 return ret;
661}
662
663int bch2_journal_flush_seq(struct journal *j, u64 seq)
664{
665 u64 start_time = local_clock();
666 int ret, ret2;
667
6a16ad95 668 ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
1c6fdbd8 669
6a16ad95
KO
670 if (!ret)
671 bch2_time_stats_update(j->flush_seq_time, start_time);
1c6fdbd8
KO
672
673 return ret ?: ret2 < 0 ? ret2 : 0;
674}
675
1c6fdbd8
KO
676int bch2_journal_meta(struct journal *j)
677{
678 struct journal_res res;
1c6fdbd8
KO
679 int ret;
680
681 memset(&res, 0, sizeof(res));
682
f1a79365 683 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
1c6fdbd8
KO
684 if (ret)
685 return ret;
686
687 bch2_journal_res_put(j, &res);
688
689 return bch2_journal_flush_seq(j, res.seq);
690}
691
692/*
693 * bch2_journal_flush_async - if there is an open journal entry, or a journal
694 * still being written, write it and wait for the write to complete
695 */
696void bch2_journal_flush_async(struct journal *j, struct closure *parent)
697{
698 u64 seq, journal_seq;
699
700 spin_lock(&j->lock);
701 journal_seq = journal_cur_seq(j);
702
703 if (journal_entry_is_open(j)) {
704 seq = journal_seq;
705 } else if (journal_seq) {
706 seq = journal_seq - 1;
707 } else {
708 spin_unlock(&j->lock);
709 return;
710 }
711 spin_unlock(&j->lock);
712
713 bch2_journal_flush_seq_async(j, seq, parent);
714}
715
716int bch2_journal_flush(struct journal *j)
717{
718 u64 seq, journal_seq;
719
720 spin_lock(&j->lock);
721 journal_seq = journal_cur_seq(j);
722
723 if (journal_entry_is_open(j)) {
724 seq = journal_seq;
725 } else if (journal_seq) {
726 seq = journal_seq - 1;
727 } else {
728 spin_unlock(&j->lock);
729 return 0;
730 }
731 spin_unlock(&j->lock);
732
733 return bch2_journal_flush_seq(j, seq);
734}
735
768ac639
KO
736/* block/unlock the journal: */
737
738void bch2_journal_unblock(struct journal *j)
739{
740 spin_lock(&j->lock);
741 j->blocked--;
742 spin_unlock(&j->lock);
743
744 journal_wake(j);
745}
746
747void bch2_journal_block(struct journal *j)
748{
749 spin_lock(&j->lock);
750 j->blocked++;
751 spin_unlock(&j->lock);
752
753 journal_quiesce(j);
754}
755
1c6fdbd8
KO
756/* allocate journal on a device: */
757
758static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
759 bool new_fs, struct closure *cl)
760{
761 struct bch_fs *c = ca->fs;
762 struct journal_device *ja = &ca->journal;
763 struct bch_sb_field_journal *journal_buckets;
764 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
765 int ret = 0;
766
767 /* don't handle reducing nr of buckets yet: */
768 if (nr <= ja->nr)
769 return 0;
770
1c6fdbd8
KO
771 new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
772 new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
f3020550
KO
773 if (!new_buckets || !new_bucket_seq) {
774 ret = -ENOMEM;
1c6fdbd8 775 goto err;
f3020550 776 }
1c6fdbd8
KO
777
778 journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
e8c851b3 779 nr + sizeof(*journal_buckets) / sizeof(u64));
f3020550
KO
780 if (!journal_buckets) {
781 ret = -ENOSPC;
1c6fdbd8 782 goto err;
f3020550 783 }
1c6fdbd8 784
97446a24
KO
785 /*
786 * We may be called from the device add path, before the new device has
787 * actually been added to the running filesystem:
788 */
1c6fdbd8
KO
789 if (c)
790 spin_lock(&c->journal.lock);
791
792 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
793 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
794 swap(new_buckets, ja->buckets);
795 swap(new_bucket_seq, ja->bucket_seq);
796
797 if (c)
798 spin_unlock(&c->journal.lock);
799
800 while (ja->nr < nr) {
801 struct open_bucket *ob = NULL;
0ce2dbbe 802 unsigned pos;
1c6fdbd8
KO
803 long bucket;
804
805 if (new_fs) {
1c6fdbd8 806 bucket = bch2_bucket_alloc_new_fs(ca);
1c6fdbd8
KO
807 if (bucket < 0) {
808 ret = -ENOSPC;
809 goto err;
810 }
811 } else {
f3020550 812 rcu_read_lock();
3187aa8d 813 ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
ef337c54 814 false, cl);
f3020550 815 rcu_read_unlock();
ef337c54 816 if (IS_ERR(ob)) {
1c6fdbd8
KO
817 ret = cl ? -EAGAIN : -ENOSPC;
818 goto err;
819 }
820
1c6fdbd8
KO
821 bucket = sector_to_bucket(ca, ob->ptr.offset);
822 }
823
824 if (c) {
9166b41d 825 percpu_down_read(&c->mark_lock);
1c6fdbd8
KO
826 spin_lock(&c->journal.lock);
827 }
828
e8c851b3
KO
829 /*
830 * XXX
831 * For resize at runtime, we should be writing the new
832 * superblock before inserting into the journal array
833 */
834
0ce2dbbe
KO
835 pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
836 __array_insert_item(ja->buckets, ja->nr, pos);
837 __array_insert_item(ja->bucket_seq, ja->nr, pos);
838 __array_insert_item(journal_buckets->buckets, ja->nr, pos);
839 ja->nr++;
1c6fdbd8 840
0ce2dbbe
KO
841 ja->buckets[pos] = bucket;
842 ja->bucket_seq[pos] = 0;
843 journal_buckets->buckets[pos] = cpu_to_le64(bucket);
1c6fdbd8 844
0ce2dbbe
KO
845 if (pos <= ja->discard_idx)
846 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
847 if (pos <= ja->dirty_idx_ondisk)
848 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
849 if (pos <= ja->dirty_idx)
850 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
851 if (pos <= ja->cur_idx)
852 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1c6fdbd8 853
bfcf840d
KO
854 if (!c || new_fs)
855 bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_journal,
856 ca->mi.bucket_size,
857 gc_phase(GC_PHASE_SB),
858 0);
1c6fdbd8
KO
859
860 if (c) {
861 spin_unlock(&c->journal.lock);
9166b41d 862 percpu_up_read(&c->mark_lock);
1c6fdbd8
KO
863 }
864
bfcf840d
KO
865 if (c && !new_fs)
866 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
867 bch2_trans_mark_metadata_bucket(&trans, NULL, ca,
868 bucket, BCH_DATA_journal,
869 ca->mi.bucket_size));
870
1c6fdbd8
KO
871 if (!new_fs)
872 bch2_open_bucket_put(c, ob);
bfcf840d
KO
873
874 if (ret)
875 goto err;
1c6fdbd8 876 }
1c6fdbd8 877err:
e8c851b3
KO
878 bch2_sb_resize_journal(&ca->disk_sb,
879 ja->nr + sizeof(*journal_buckets) / sizeof(u64));
1c6fdbd8
KO
880 kfree(new_bucket_seq);
881 kfree(new_buckets);
882
883 return ret;
884}
885
886/*
887 * Allocate more journal space at runtime - not currently making use if it, but
888 * the code works:
889 */
890int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
891 unsigned nr)
892{
893 struct journal_device *ja = &ca->journal;
894 struct closure cl;
895 unsigned current_nr;
896 int ret;
897
898 closure_init_stack(&cl);
899
900 do {
901 struct disk_reservation disk_res = { 0, 0 };
902
903 closure_sync(&cl);
904
905 mutex_lock(&c->sb_lock);
906 current_nr = ja->nr;
907
908 /*
909 * note: journal buckets aren't really counted as _sectors_ used yet, so
910 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
911 * when space used goes up without a reservation - but we do need the
912 * reservation to ensure we'll actually be able to allocate:
913 */
914
915 if (bch2_disk_reservation_get(c, &disk_res,
d16b4a77 916 bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
1c6fdbd8
KO
917 mutex_unlock(&c->sb_lock);
918 return -ENOSPC;
919 }
920
921 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
922
923 bch2_disk_reservation_put(c, &disk_res);
924
925 if (ja->nr != current_nr)
926 bch2_write_super(c);
927 mutex_unlock(&c->sb_lock);
928 } while (ret == -EAGAIN);
929
930 return ret;
931}
932
933int bch2_dev_journal_alloc(struct bch_dev *ca)
934{
935 unsigned nr;
936
937 if (dynamic_fault("bcachefs:add:journal_alloc"))
938 return -ENOMEM;
939
7c8b166e
KO
940 /* 1/128th of the device by default: */
941 nr = ca->mi.nbuckets >> 7;
942
1c6fdbd8 943 /*
7c8b166e 944 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1c6fdbd8
KO
945 * is smaller:
946 */
7c8b166e 947 nr = clamp_t(unsigned, nr,
1c6fdbd8 948 BCH_JOURNAL_BUCKETS_MIN,
7c8b166e
KO
949 min(1 << 13,
950 (1 << 24) / ca->mi.bucket_size));
1c6fdbd8
KO
951
952 return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
953}
954
955/* startup/shutdown: */
956
957static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
958{
959 union journal_res_state state;
ebb84d09
KO
960 bool ret = false;
961 unsigned i;
1c6fdbd8
KO
962
963 spin_lock(&j->lock);
964 state = READ_ONCE(j->reservations);
ebb84d09 965 i = state.idx;
1c6fdbd8 966
ebb84d09
KO
967 while (i != state.unwritten_idx) {
968 i = (i - 1) & JOURNAL_BUF_MASK;
969 if (bch2_bkey_has_device(bkey_i_to_s_c(&j->buf[i].key), dev_idx))
970 ret = true;
971 }
1c6fdbd8
KO
972 spin_unlock(&j->lock);
973
974 return ret;
975}
976
977void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
978{
1c6fdbd8
KO
979 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
980}
981
982void bch2_fs_journal_stop(struct journal *j)
983{
1f7d45be
KO
984 bch2_journal_flush_all_pins(j);
985
9c859dc9 986 wait_event(j->wait, journal_entry_close(j));
1c6fdbd8 987
8be901d5
KO
988 /*
989 * Always write a new journal entry, to make sure the clock hands are up
990 * to date (and match the superblock)
991 */
992 bch2_journal_meta(j);
1c6fdbd8 993
768ac639 994 journal_quiesce(j);
9c859dc9 995
1c6fdbd8 996 BUG_ON(!bch2_journal_error(j) &&
fdbb88ac 997 test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
158eecb8
KO
998 (journal_entry_is_open(j) ||
999 j->last_empty_seq + 1 != journal_cur_seq(j)));
1c6fdbd8
KO
1000
1001 cancel_delayed_work_sync(&j->write_work);
b7a9bbfc 1002 bch2_journal_reclaim_stop(j);
1c6fdbd8
KO
1003}
1004
1dd7f9d9
KO
1005int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
1006 struct list_head *journal_entries)
1c6fdbd8 1007{
c6923995 1008 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1dd7f9d9
KO
1009 struct journal_entry_pin_list *p;
1010 struct journal_replay *i;
1011 u64 last_seq = cur_seq, nr, seq;
1012
1013 if (!list_empty(journal_entries))
7fffc85b
KO
1014 last_seq = le64_to_cpu(list_last_entry(journal_entries,
1015 struct journal_replay, list)->j.last_seq);
1dd7f9d9
KO
1016
1017 nr = cur_seq - last_seq;
1018
1019 if (nr + 1 > j->pin.size) {
1020 free_fifo(&j->pin);
1021 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1022 if (!j->pin.data) {
1023 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1024 return -ENOMEM;
1025 }
1026 }
1027
644d180b
KO
1028 j->replay_journal_seq = last_seq;
1029 j->replay_journal_seq_end = cur_seq;
1dd7f9d9
KO
1030 j->last_seq_ondisk = last_seq;
1031 j->pin.front = last_seq;
1032 j->pin.back = cur_seq;
1033 atomic64_set(&j->seq, cur_seq - 1);
1034
241e2636
KO
1035 fifo_for_each_entry_ptr(p, &j->pin, seq)
1036 journal_pin_list_init(p, 1);
1dd7f9d9
KO
1037
1038 list_for_each_entry(i, journal_entries, list) {
e4c3f386
KO
1039 unsigned ptr;
1040
1dd7f9d9 1041 seq = le64_to_cpu(i->j.seq);
7fffc85b 1042 BUG_ON(seq >= cur_seq);
1dd7f9d9 1043
7fffc85b
KO
1044 if (seq < last_seq)
1045 continue;
1c6fdbd8 1046
e4c3f386
KO
1047 p = journal_seq_pin(j, seq);
1048
1049 p->devs.nr = 0;
1050 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1051 bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1dd7f9d9 1052 }
1c6fdbd8
KO
1053
1054 spin_lock(&j->lock);
1055
1056 set_bit(JOURNAL_STARTED, &j->flags);
adbcada4 1057 j->last_flush_write = jiffies;
1c6fdbd8 1058
241e2636 1059 journal_pin_new_entry(j);
158eecb8 1060
ebb84d09 1061 j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
158eecb8 1062
1c6fdbd8
KO
1063 bch2_journal_buf_init(j);
1064
c6923995
KO
1065 c->last_bucket_seq_cleanup = journal_cur_seq(j);
1066
e5a66496 1067 bch2_journal_space_available(j);
1c6fdbd8
KO
1068 spin_unlock(&j->lock);
1069
9ae28f82 1070 return bch2_journal_reclaim_start(j);
1c6fdbd8
KO
1071}
1072
1073/* init/exit: */
1074
1075void bch2_dev_journal_exit(struct bch_dev *ca)
1076{
1077 kfree(ca->journal.bio);
1078 kfree(ca->journal.buckets);
1079 kfree(ca->journal.bucket_seq);
1080
1081 ca->journal.bio = NULL;
1082 ca->journal.buckets = NULL;
1083 ca->journal.bucket_seq = NULL;
1084}
1085
1086int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1087{
1088 struct journal_device *ja = &ca->journal;
1089 struct bch_sb_field_journal *journal_buckets =
1090 bch2_sb_get_journal(sb);
1091 unsigned i, nr_bvecs;
1092
1093 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1094
1095 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1096 if (!ja->bucket_seq)
1097 return -ENOMEM;
1098
1099 nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1100
1101 ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
1102 if (!ca->journal.bio)
1103 return -ENOMEM;
1104
1105 bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
1106
1107 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1108 if (!ja->buckets)
1109 return -ENOMEM;
1110
1111 for (i = 0; i < ja->nr; i++)
1112 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1113
1114 return 0;
1115}
1116
1117void bch2_fs_journal_exit(struct journal *j)
1118{
ebb84d09
KO
1119 unsigned i;
1120
1121 for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1122 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1c6fdbd8
KO
1123 free_fifo(&j->pin);
1124}
1125
1126int bch2_fs_journal_init(struct journal *j)
1127{
1128 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1129 static struct lock_class_key res_key;
ebb84d09 1130 unsigned i;
1c6fdbd8
KO
1131 int ret = 0;
1132
1133 pr_verbose_init(c->opts, "");
1134
1135 spin_lock_init(&j->lock);
1136 spin_lock_init(&j->err_lock);
1137 init_waitqueue_head(&j->wait);
1138 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
24db24c7 1139 init_waitqueue_head(&j->reclaim_wait);
4077991c 1140 init_waitqueue_head(&j->pin_flush_wait);
1c6fdbd8 1141 mutex_init(&j->reclaim_lock);
0ce2dbbe 1142 mutex_init(&j->discard_lock);
1c6fdbd8
KO
1143
1144 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1145
1c6fdbd8
KO
1146 j->write_delay_ms = 1000;
1147 j->reclaim_delay_ms = 100;
1148
1c6fdbd8
KO
1149 atomic64_set(&j->reservations.counter,
1150 ((union journal_res_state)
1151 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1152
ebb84d09 1153 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
1c6fdbd8
KO
1154 ret = -ENOMEM;
1155 goto out;
1156 }
1157
ebb84d09
KO
1158 for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1159 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1160 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1161 if (!j->buf[i].data) {
1162 ret = -ENOMEM;
1163 goto out;
1164 }
1165 }
1166
1c6fdbd8
KO
1167 j->pin.front = j->pin.back = 1;
1168out:
1169 pr_verbose_init(c->opts, "ret %i", ret);
1170 return ret;
1171}
1172
1173/* debug: */
1174
5d32c5bb 1175void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1c6fdbd8
KO
1176{
1177 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2d3b5810 1178 union journal_res_state s;
1c6fdbd8 1179 struct bch_dev *ca;
ebb84d09 1180 unsigned i;
1c6fdbd8
KO
1181
1182 rcu_read_lock();
2d3b5810 1183 s = READ_ONCE(j->reservations);
1c6fdbd8 1184
7807e143 1185 pr_buf(out,
319f9ac3
KO
1186 "active journal entries:\t%llu\n"
1187 "seq:\t\t\t%llu\n"
1188 "last_seq:\t\t%llu\n"
1189 "last_seq_ondisk:\t%llu\n"
6a16ad95 1190 "flushed_seq_ondisk:\t%llu\n"
68ef94a6 1191 "prereserved:\t\t%u/%u\n"
2940295c 1192 "each entry reserved:\t%u\n"
adbcada4
KO
1193 "nr flush writes:\t%llu\n"
1194 "nr noflush writes:\t%llu\n"
b7a9bbfc
KO
1195 "nr direct reclaim:\t%llu\n"
1196 "nr background reclaim:\t%llu\n"
68ef94a6 1197 "current entry sectors:\t%u\n"
ed0e24c0 1198 "current entry error:\t%u\n"
2d3b5810 1199 "current entry:\t\t",
319f9ac3
KO
1200 fifo_used(&j->pin),
1201 journal_cur_seq(j),
1202 journal_last_seq(j),
68ef94a6 1203 j->last_seq_ondisk,
6a16ad95 1204 j->flushed_seq_ondisk,
68ef94a6
KO
1205 j->prereserved.reserved,
1206 j->prereserved.remaining,
2940295c 1207 j->entry_u64s_reserved,
adbcada4
KO
1208 j->nr_flush_writes,
1209 j->nr_noflush_writes,
b7a9bbfc
KO
1210 j->nr_direct_reclaim,
1211 j->nr_background_reclaim,
ed0e24c0
KO
1212 j->cur_entry_sectors,
1213 j->cur_entry_error);
2d3b5810
KO
1214
1215 switch (s.cur_entry_offset) {
1216 case JOURNAL_ENTRY_ERROR_VAL:
7807e143 1217 pr_buf(out, "error\n");
2d3b5810
KO
1218 break;
1219 case JOURNAL_ENTRY_CLOSED_VAL:
7807e143 1220 pr_buf(out, "closed\n");
2d3b5810
KO
1221 break;
1222 default:
7807e143 1223 pr_buf(out, "%u/%u\n",
2d3b5810
KO
1224 s.cur_entry_offset,
1225 j->cur_entry_u64s);
1226 break;
1227 }
1228
7807e143 1229 pr_buf(out,
b6df4325 1230 "current entry:\t\tidx %u refcount %u\n",
ebb84d09
KO
1231 s.idx, journal_state_count(s, s.idx));
1232
1233 i = s.idx;
1234 while (i != s.unwritten_idx) {
1235 i = (i - 1) & JOURNAL_BUF_MASK;
1236
1237 pr_buf(out, "unwritten entry:\tidx %u refcount %u sectors %u\n",
1238 i, journal_state_count(s, i), j->buf[i].sectors);
1239 }
2d3b5810 1240
7807e143 1241 pr_buf(out,
2d3b5810
KO
1242 "need write:\t\t%i\n"
1243 "replay done:\t\t%i\n",
319f9ac3 1244 test_bit(JOURNAL_NEED_WRITE, &j->flags),
319f9ac3 1245 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
1c6fdbd8 1246
b6df4325
KO
1247 pr_buf(out, "space:\n");
1248 pr_buf(out, "\tdiscarded\t%u:%u\n",
1249 j->space[journal_space_discarded].next_entry,
1250 j->space[journal_space_discarded].total);
1251 pr_buf(out, "\tclean ondisk\t%u:%u\n",
1252 j->space[journal_space_clean_ondisk].next_entry,
1253 j->space[journal_space_clean_ondisk].total);
1254 pr_buf(out, "\tclean\t\t%u:%u\n",
1255 j->space[journal_space_clean].next_entry,
1256 j->space[journal_space_clean].total);
1257 pr_buf(out, "\ttotal\t\t%u:%u\n",
1258 j->space[journal_space_total].next_entry,
1259 j->space[journal_space_total].total);
1260
ebb84d09 1261 for_each_member_device_rcu(ca, c, i,
89fd25be 1262 &c->rw_devs[BCH_DATA_journal]) {
1c6fdbd8
KO
1263 struct journal_device *ja = &ca->journal;
1264
ba401eaa
KO
1265 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1266 continue;
1267
1c6fdbd8
KO
1268 if (!ja->nr)
1269 continue;
1270
7807e143 1271 pr_buf(out,
319f9ac3
KO
1272 "dev %u:\n"
1273 "\tnr\t\t%u\n"
b6df4325 1274 "\tbucket size\t%u\n"
e5a66496 1275 "\tavailable\t%u:%u\n"
b6df4325
KO
1276 "\tdiscard_idx\t%u\n"
1277 "\tdirty_ondisk\t%u (seq %llu)\n"
1278 "\tdirty_idx\t%u (seq %llu)\n"
0ce2dbbe 1279 "\tcur_idx\t\t%u (seq %llu)\n",
b6df4325 1280 i, ja->nr, ca->mi.bucket_size,
03d5eaed 1281 bch2_journal_dev_buckets_available(j, ja, journal_space_discarded),
e5a66496 1282 ja->sectors_free,
0ce2dbbe
KO
1283 ja->discard_idx,
1284 ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk],
1285 ja->dirty_idx, ja->bucket_seq[ja->dirty_idx],
1286 ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
1c6fdbd8
KO
1287 }
1288
1c6fdbd8 1289 rcu_read_unlock();
1c6fdbd8
KO
1290}
1291
5d32c5bb
KO
1292void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1293{
1294 spin_lock(&j->lock);
1295 __bch2_journal_debug_to_text(out, j);
1296 spin_unlock(&j->lock);
1297}
1298
7807e143 1299void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1c6fdbd8
KO
1300{
1301 struct journal_entry_pin_list *pin_list;
1302 struct journal_entry_pin *pin;
1c6fdbd8
KO
1303 u64 i;
1304
1305 spin_lock(&j->lock);
1306 fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
7807e143 1307 pr_buf(out, "%llu: count %u\n",
319f9ac3 1308 i, atomic_read(&pin_list->count));
1c6fdbd8
KO
1309
1310 list_for_each_entry(pin, &pin_list->list, list)
7807e143 1311 pr_buf(out, "\t%px %ps\n",
319f9ac3 1312 pin, pin->flush);
1c6fdbd8
KO
1313
1314 if (!list_empty(&pin_list->flushed))
7807e143 1315 pr_buf(out, "flushed:\n");
1c6fdbd8
KO
1316
1317 list_for_each_entry(pin, &pin_list->flushed, list)
7807e143 1318 pr_buf(out, "\t%px %ps\n",
319f9ac3 1319 pin, pin->flush);
1c6fdbd8
KO
1320 }
1321 spin_unlock(&j->lock);
1c6fdbd8 1322}