bcachefs: Ensure we wake up threads locking node when reusing it
[linux-block.git] / fs / bcachefs / btree_cache.h
CommitLineData
1c6fdbd8
KO
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_BTREE_CACHE_H
3#define _BCACHEFS_BTREE_CACHE_H
4
5#include "bcachefs.h"
6#include "btree_types.h"
1c6fdbd8
KO
7
8struct btree_iter;
9
10extern const char * const bch2_btree_ids[];
11
12void bch2_recalc_btree_reserve(struct bch_fs *);
13
14void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
15int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
16int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
17 unsigned, enum btree_id);
18
19void bch2_btree_cache_cannibalize_unlock(struct bch_fs *);
20int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *);
21
22struct btree *bch2_btree_node_mem_alloc(struct bch_fs *);
23
24struct btree *bch2_btree_node_get(struct bch_fs *, struct btree_iter *,
25 const struct bkey_i *, unsigned,
b03b81df 26 enum six_lock_type);
1c6fdbd8 27
e62d65f2
KO
28struct btree *bch2_btree_node_get_noiter(struct bch_fs *, const struct bkey_i *,
29 enum btree_id, unsigned);
30
1c6fdbd8 31struct btree *bch2_btree_node_get_sibling(struct bch_fs *, struct btree_iter *,
b03b81df 32 struct btree *, enum btree_node_sibling);
1c6fdbd8
KO
33
34void bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *,
35 const struct bkey_i *, unsigned);
36
37void bch2_fs_btree_cache_exit(struct bch_fs *);
38int bch2_fs_btree_cache_init(struct bch_fs *);
39void bch2_fs_btree_cache_init_early(struct btree_cache *);
40
237e8048
KO
41static inline u64 btree_ptr_hash_val(const struct bkey_i *k)
42{
43 switch (k->k.type) {
44 case KEY_TYPE_btree_ptr:
45 return *((u64 *) bkey_i_to_btree_ptr_c(k)->v.start);
548b3d20
KO
46 case KEY_TYPE_btree_ptr_v2:
47 return bkey_i_to_btree_ptr_v2_c(k)->v.seq;
237e8048
KO
48 default:
49 return 0;
50 }
51}
1c6fdbd8 52
72141e1f
KO
53static inline struct btree *btree_node_mem_ptr(const struct bkey_i *k)
54{
55 return k->k.type == KEY_TYPE_btree_ptr_v2
56 ? (void *)(unsigned long)bkey_i_to_btree_ptr_v2_c(k)->v.mem_ptr
57 : NULL;
58}
59
1c6fdbd8
KO
60/* is btree node in hash table? */
61static inline bool btree_node_hashed(struct btree *b)
62{
237e8048 63 return b->hash_val != 0;
1c6fdbd8
KO
64}
65
66#define for_each_cached_btree(_b, _c, _tbl, _iter, _pos) \
67 for ((_tbl) = rht_dereference_rcu((_c)->btree_cache.table.tbl, \
68 &(_c)->btree_cache.table), \
69 _iter = 0; _iter < (_tbl)->size; _iter++) \
70 rht_for_each_entry_rcu((_b), (_pos), _tbl, _iter, hash)
71
72static inline size_t btree_bytes(struct bch_fs *c)
73{
74 return c->opts.btree_node_size << 9;
75}
76
77static inline size_t btree_max_u64s(struct bch_fs *c)
78{
79 return (btree_bytes(c) - sizeof(struct btree_node)) / sizeof(u64);
80}
81
82static inline size_t btree_page_order(struct bch_fs *c)
83{
84 return get_order(btree_bytes(c));
85}
86
87static inline size_t btree_pages(struct bch_fs *c)
88{
89 return 1 << btree_page_order(c);
90}
91
92static inline unsigned btree_blocks(struct bch_fs *c)
93{
94 return c->opts.btree_node_size >> c->block_bits;
95}
96
f270667a 97#define BTREE_SPLIT_THRESHOLD(c) (btree_max_u64s(c) * 2 / 3)
1c6fdbd8
KO
98
99#define BTREE_FOREGROUND_MERGE_THRESHOLD(c) (btree_max_u64s(c) * 1 / 3)
100#define BTREE_FOREGROUND_MERGE_HYSTERESIS(c) \
101 (BTREE_FOREGROUND_MERGE_THRESHOLD(c) + \
102 (BTREE_FOREGROUND_MERGE_THRESHOLD(c) << 2))
103
c43a6ef9 104#define btree_node_root(_c, _b) ((_c)->btree_roots[(_b)->c.btree_id].b)
1c6fdbd8 105
319f9ac3
KO
106void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *,
107 struct btree *);
1c6fdbd8
KO
108
109#endif /* _BCACHEFS_BTREE_CACHE_H */