Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | #ifndef _BCACHE_BTREE_H |
2 | #define _BCACHE_BTREE_H | |
3 | ||
4 | /* | |
5 | * THE BTREE: | |
6 | * | |
7 | * At a high level, bcache's btree is relatively standard b+ tree. All keys and | |
8 | * pointers are in the leaves; interior nodes only have pointers to the child | |
9 | * nodes. | |
10 | * | |
11 | * In the interior nodes, a struct bkey always points to a child btree node, and | |
12 | * the key is the highest key in the child node - except that the highest key in | |
13 | * an interior node is always MAX_KEY. The size field refers to the size on disk | |
14 | * of the child node - this would allow us to have variable sized btree nodes | |
15 | * (handy for keeping the depth of the btree 1 by expanding just the root). | |
16 | * | |
17 | * Btree nodes are themselves log structured, but this is hidden fairly | |
18 | * thoroughly. Btree nodes on disk will in practice have extents that overlap | |
19 | * (because they were written at different times), but in memory we never have | |
20 | * overlapping extents - when we read in a btree node from disk, the first thing | |
21 | * we do is resort all the sets of keys with a mergesort, and in the same pass | |
22 | * we check for overlapping extents and adjust them appropriately. | |
23 | * | |
24 | * struct btree_op is a central interface to the btree code. It's used for | |
25 | * specifying read vs. write locking, and the embedded closure is used for | |
26 | * waiting on IO or reserve memory. | |
27 | * | |
28 | * BTREE CACHE: | |
29 | * | |
30 | * Btree nodes are cached in memory; traversing the btree might require reading | |
31 | * in btree nodes which is handled mostly transparently. | |
32 | * | |
33 | * bch_btree_node_get() looks up a btree node in the cache and reads it in from | |
34 | * disk if necessary. This function is almost never called directly though - the | |
35 | * btree() macro is used to get a btree node, call some function on it, and | |
36 | * unlock the node after the function returns. | |
37 | * | |
38 | * The root is special cased - it's taken out of the cache's lru (thus pinning | |
39 | * it in memory), so we can find the root of the btree by just dereferencing a | |
40 | * pointer instead of looking it up in the cache. This makes locking a bit | |
41 | * tricky, since the root pointer is protected by the lock in the btree node it | |
42 | * points to - the btree_root() macro handles this. | |
43 | * | |
44 | * In various places we must be able to allocate memory for multiple btree nodes | |
45 | * in order to make forward progress. To do this we use the btree cache itself | |
46 | * as a reserve; if __get_free_pages() fails, we'll find a node in the btree | |
47 | * cache we can reuse. We can't allow more than one thread to be doing this at a | |
48 | * time, so there's a lock, implemented by a pointer to the btree_op closure - | |
49 | * this allows the btree_root() macro to implicitly release this lock. | |
50 | * | |
51 | * BTREE IO: | |
52 | * | |
53 | * Btree nodes never have to be explicitly read in; bch_btree_node_get() handles | |
54 | * this. | |
55 | * | |
56 | * For writing, we have two btree_write structs embeddded in struct btree - one | |
57 | * write in flight, and one being set up, and we toggle between them. | |
58 | * | |
59 | * Writing is done with a single function - bch_btree_write() really serves two | |
60 | * different purposes and should be broken up into two different functions. When | |
61 | * passing now = false, it merely indicates that the node is now dirty - calling | |
62 | * it ensures that the dirty keys will be written at some point in the future. | |
63 | * | |
64 | * When passing now = true, bch_btree_write() causes a write to happen | |
65 | * "immediately" (if there was already a write in flight, it'll cause the write | |
66 | * to happen as soon as the previous write completes). It returns immediately | |
67 | * though - but it takes a refcount on the closure in struct btree_op you passed | |
68 | * to it, so a closure_sync() later can be used to wait for the write to | |
69 | * complete. | |
70 | * | |
71 | * This is handy because btree_split() and garbage collection can issue writes | |
72 | * in parallel, reducing the amount of time they have to hold write locks. | |
73 | * | |
74 | * LOCKING: | |
75 | * | |
76 | * When traversing the btree, we may need write locks starting at some level - | |
77 | * inserting a key into the btree will typically only require a write lock on | |
78 | * the leaf node. | |
79 | * | |
80 | * This is specified with the lock field in struct btree_op; lock = 0 means we | |
81 | * take write locks at level <= 0, i.e. only leaf nodes. bch_btree_node_get() | |
82 | * checks this field and returns the node with the appropriate lock held. | |
83 | * | |
84 | * If, after traversing the btree, the insertion code discovers it has to split | |
85 | * then it must restart from the root and take new locks - to do this it changes | |
86 | * the lock field and returns -EINTR, which causes the btree_root() macro to | |
87 | * loop. | |
88 | * | |
89 | * Handling cache misses require a different mechanism for upgrading to a write | |
90 | * lock. We do cache lookups with only a read lock held, but if we get a cache | |
91 | * miss and we wish to insert this data into the cache, we have to insert a | |
92 | * placeholder key to detect races - otherwise, we could race with a write and | |
93 | * overwrite the data that was just written to the cache with stale data from | |
94 | * the backing device. | |
95 | * | |
96 | * For this we use a sequence number that write locks and unlocks increment - to | |
97 | * insert the check key it unlocks the btree node and then takes a write lock, | |
98 | * and fails if the sequence number doesn't match. | |
99 | */ | |
100 | ||
101 | #include "bset.h" | |
102 | #include "debug.h" | |
103 | ||
104 | struct btree_write { | |
cafe5635 KO |
105 | atomic_t *journal; |
106 | ||
107 | /* If btree_split() frees a btree node, it writes a new pointer to that | |
108 | * btree node indicating it was freed; it takes a refcount on | |
109 | * c->prio_blocked because we can't write the gens until the new | |
110 | * pointer is on disk. This allows btree_write_endio() to release the | |
111 | * refcount that btree_split() took. | |
112 | */ | |
113 | int prio_blocked; | |
114 | }; | |
115 | ||
116 | struct btree { | |
117 | /* Hottest entries first */ | |
118 | struct hlist_node hash; | |
119 | ||
120 | /* Key/pointer for this btree node */ | |
121 | BKEY_PADDED(key); | |
122 | ||
123 | /* Single bit - set when accessed, cleared by shrinker */ | |
124 | unsigned long accessed; | |
125 | unsigned long seq; | |
126 | struct rw_semaphore lock; | |
127 | struct cache_set *c; | |
d6fd3b11 | 128 | struct btree *parent; |
cafe5635 | 129 | |
2a285686 KO |
130 | struct mutex write_lock; |
131 | ||
cafe5635 KO |
132 | unsigned long flags; |
133 | uint16_t written; /* would be nice to kill */ | |
134 | uint8_t level; | |
cafe5635 | 135 | |
a85e968e | 136 | struct btree_keys keys; |
cafe5635 | 137 | |
57943511 | 138 | /* For outstanding btree writes, used as a lock - protects write_idx */ |
cb7a583e KO |
139 | struct closure io; |
140 | struct semaphore io_mutex; | |
cafe5635 | 141 | |
cafe5635 KO |
142 | struct list_head list; |
143 | struct delayed_work work; | |
144 | ||
cafe5635 KO |
145 | struct btree_write writes[2]; |
146 | struct bio *bio; | |
147 | }; | |
148 | ||
149 | #define BTREE_FLAG(flag) \ | |
150 | static inline bool btree_node_ ## flag(struct btree *b) \ | |
151 | { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \ | |
152 | \ | |
153 | static inline void set_btree_node_ ## flag(struct btree *b) \ | |
154 | { set_bit(BTREE_NODE_ ## flag, &b->flags); } \ | |
155 | ||
156 | enum btree_flags { | |
cafe5635 KO |
157 | BTREE_NODE_io_error, |
158 | BTREE_NODE_dirty, | |
159 | BTREE_NODE_write_idx, | |
160 | }; | |
161 | ||
cafe5635 KO |
162 | BTREE_FLAG(io_error); |
163 | BTREE_FLAG(dirty); | |
164 | BTREE_FLAG(write_idx); | |
165 | ||
166 | static inline struct btree_write *btree_current_write(struct btree *b) | |
167 | { | |
168 | return b->writes + btree_node_write_idx(b); | |
169 | } | |
170 | ||
171 | static inline struct btree_write *btree_prev_write(struct btree *b) | |
172 | { | |
173 | return b->writes + (btree_node_write_idx(b) ^ 1); | |
174 | } | |
175 | ||
88b9f8c4 KO |
176 | static inline struct bset *btree_bset_first(struct btree *b) |
177 | { | |
a85e968e | 178 | return b->keys.set->data; |
88b9f8c4 KO |
179 | } |
180 | ||
ee811287 KO |
181 | static inline struct bset *btree_bset_last(struct btree *b) |
182 | { | |
a85e968e | 183 | return bset_tree_last(&b->keys)->data; |
88b9f8c4 KO |
184 | } |
185 | ||
186 | static inline unsigned bset_block_offset(struct btree *b, struct bset *i) | |
187 | { | |
a85e968e | 188 | return bset_sector_offset(&b->keys, i) >> b->c->block_bits; |
cafe5635 KO |
189 | } |
190 | ||
191 | static inline void set_gc_sectors(struct cache_set *c) | |
192 | { | |
a1f0358b | 193 | atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16); |
cafe5635 KO |
194 | } |
195 | ||
3a3b6a4e | 196 | void bkey_put(struct cache_set *c, struct bkey *k); |
e7c590eb | 197 | |
cafe5635 KO |
198 | /* Looping macros */ |
199 | ||
200 | #define for_each_cached_btree(b, c, iter) \ | |
201 | for (iter = 0; \ | |
202 | iter < ARRAY_SIZE((c)->bucket_hash); \ | |
203 | iter++) \ | |
204 | hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash) | |
205 | ||
cafe5635 KO |
206 | /* Recursing down the btree */ |
207 | ||
208 | struct btree_op { | |
78365411 KO |
209 | /* for waiting on btree reserve in btree_split() */ |
210 | wait_queue_t wait; | |
211 | ||
cafe5635 KO |
212 | /* Btree level at which we start taking write locks */ |
213 | short lock; | |
214 | ||
cafe5635 | 215 | unsigned insert_collision:1; |
cafe5635 KO |
216 | }; |
217 | ||
b54d6934 KO |
218 | static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) |
219 | { | |
220 | memset(op, 0, sizeof(struct btree_op)); | |
78365411 | 221 | init_wait(&op->wait); |
b54d6934 KO |
222 | op->lock = write_lock_level; |
223 | } | |
cafe5635 KO |
224 | |
225 | static inline void rw_lock(bool w, struct btree *b, int level) | |
226 | { | |
227 | w ? down_write_nested(&b->lock, level + 1) | |
228 | : down_read_nested(&b->lock, level + 1); | |
229 | if (w) | |
230 | b->seq++; | |
231 | } | |
232 | ||
233 | static inline void rw_unlock(bool w, struct btree *b) | |
234 | { | |
cafe5635 KO |
235 | if (w) |
236 | b->seq++; | |
237 | (w ? up_write : up_read)(&b->lock); | |
238 | } | |
239 | ||
78b77bf8 | 240 | void bch_btree_node_read_done(struct btree *); |
2a285686 | 241 | void __bch_btree_node_write(struct btree *, struct closure *); |
57943511 | 242 | void bch_btree_node_write(struct btree *, struct closure *); |
cafe5635 | 243 | |
cafe5635 | 244 | void bch_btree_set_root(struct btree *); |
0a63b66d KO |
245 | struct btree *bch_btree_node_alloc(struct cache_set *, struct btree_op *, int); |
246 | struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *, | |
247 | struct bkey *, int, bool); | |
cafe5635 | 248 | |
e7c590eb KO |
249 | int bch_btree_insert_check_key(struct btree *, struct btree_op *, |
250 | struct bkey *); | |
cc7b8819 KO |
251 | int bch_btree_insert(struct cache_set *, struct keylist *, |
252 | atomic_t *, struct bkey *); | |
cafe5635 | 253 | |
72a44517 | 254 | int bch_gc_thread_start(struct cache_set *); |
2531d9ee | 255 | void bch_initial_gc_finish(struct cache_set *); |
72a44517 | 256 | void bch_moving_gc(struct cache_set *); |
c18536a7 | 257 | int bch_btree_check(struct cache_set *); |
487dded8 | 258 | void bch_initial_mark_key(struct cache_set *, int, struct bkey *); |
cafe5635 | 259 | |
72a44517 KO |
260 | static inline void wake_up_gc(struct cache_set *c) |
261 | { | |
262 | if (c->gc_thread) | |
263 | wake_up_process(c->gc_thread); | |
264 | } | |
265 | ||
48dad8ba KO |
266 | #define MAP_DONE 0 |
267 | #define MAP_CONTINUE 1 | |
268 | ||
269 | #define MAP_ALL_NODES 0 | |
270 | #define MAP_LEAF_NODES 1 | |
271 | ||
272 | #define MAP_END_KEY 1 | |
273 | ||
274 | typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *); | |
275 | int __bch_btree_map_nodes(struct btree_op *, struct cache_set *, | |
276 | struct bkey *, btree_map_nodes_fn *, int); | |
277 | ||
278 | static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, | |
279 | struct bkey *from, btree_map_nodes_fn *fn) | |
280 | { | |
281 | return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES); | |
282 | } | |
283 | ||
284 | static inline int bch_btree_map_leaf_nodes(struct btree_op *op, | |
285 | struct cache_set *c, | |
286 | struct bkey *from, | |
287 | btree_map_nodes_fn *fn) | |
288 | { | |
289 | return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES); | |
290 | } | |
291 | ||
292 | typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *, | |
293 | struct bkey *); | |
294 | int bch_btree_map_keys(struct btree_op *, struct cache_set *, | |
295 | struct bkey *, btree_map_keys_fn *, int); | |
296 | ||
297 | typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); | |
298 | ||
72c27061 | 299 | void bch_keybuf_init(struct keybuf *); |
48dad8ba KO |
300 | void bch_refill_keybuf(struct cache_set *, struct keybuf *, |
301 | struct bkey *, keybuf_pred_fn *); | |
cafe5635 KO |
302 | bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, |
303 | struct bkey *); | |
304 | void bch_keybuf_del(struct keybuf *, struct keybuf_key *); | |
305 | struct keybuf_key *bch_keybuf_next(struct keybuf *); | |
72c27061 KO |
306 | struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *, |
307 | struct bkey *, keybuf_pred_fn *); | |
cafe5635 KO |
308 | |
309 | #endif |