Commit | Line | Data |
---|---|---|
1c6fdbd8 KO |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> | |
4 | * Copyright (C) 2014 Datera Inc. | |
5 | */ | |
6 | ||
7 | #include "bcachefs.h" | |
7b3f84ea | 8 | #include "alloc_background.h" |
ef337c54 | 9 | #include "alloc_foreground.h" |
1c6fdbd8 | 10 | #include "bkey_methods.h" |
07a1006a | 11 | #include "bkey_buf.h" |
1c6fdbd8 KO |
12 | #include "btree_locking.h" |
13 | #include "btree_update_interior.h" | |
14 | #include "btree_io.h" | |
15 | #include "btree_gc.h" | |
16 | #include "buckets.h" | |
17 | #include "clock.h" | |
18 | #include "debug.h" | |
cd575ddf | 19 | #include "ec.h" |
1c6fdbd8 KO |
20 | #include "error.h" |
21 | #include "extents.h" | |
22 | #include "journal.h" | |
23 | #include "keylist.h" | |
24 | #include "move.h" | |
d0734356 | 25 | #include "recovery.h" |
1c6fdbd8 KO |
26 | #include "replicas.h" |
27 | #include "super-io.h" | |
28 | #include "trace.h" | |
29 | ||
30 | #include <linux/slab.h> | |
31 | #include <linux/bitops.h> | |
32 | #include <linux/freezer.h> | |
33 | #include <linux/kthread.h> | |
34 | #include <linux/preempt.h> | |
35 | #include <linux/rcupdate.h> | |
36 | #include <linux/sched/task.h> | |
37 | ||
2252aa27 KO |
38 | static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos) |
39 | { | |
40 | preempt_disable(); | |
41 | write_seqcount_begin(&c->gc_pos_lock); | |
42 | c->gc_pos = new_pos; | |
43 | write_seqcount_end(&c->gc_pos_lock); | |
44 | preempt_enable(); | |
45 | } | |
46 | ||
47 | static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos) | |
48 | { | |
49 | BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0); | |
50 | __gc_pos_set(c, new_pos); | |
51 | } | |
52 | ||
a0b73c1c KO |
53 | /* |
54 | * Missing: if an interior btree node is empty, we need to do something - | |
55 | * perhaps just kill it | |
56 | */ | |
d06c1a0c | 57 | static int bch2_gc_check_topology(struct bch_fs *c, |
a66f7989 KO |
58 | struct btree *b, |
59 | struct bkey_buf *prev, | |
60 | struct bkey_buf cur, | |
d06c1a0c | 61 | bool is_last) |
1c6fdbd8 | 62 | { |
a66f7989 KO |
63 | struct bpos node_start = b->data->min_key; |
64 | struct bpos node_end = b->data->max_key; | |
65 | struct bpos expected_start = bkey_deleted(&prev->k->k) | |
66 | ? node_start | |
67 | : bkey_successor(prev->k->k.p); | |
68 | char buf1[200], buf2[200]; | |
a0b73c1c KO |
69 | bool update_min = false; |
70 | bool update_max = false; | |
d06c1a0c | 71 | int ret = 0; |
1c6fdbd8 | 72 | |
a66f7989 KO |
73 | if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) { |
74 | struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k); | |
1c6fdbd8 | 75 | |
a66f7989 KO |
76 | if (bkey_deleted(&prev->k->k)) |
77 | scnprintf(buf1, sizeof(buf1), "start of node: %llu:%llu", | |
78 | node_start.inode, | |
79 | node_start.offset); | |
80 | else | |
81 | bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k)); | |
82 | ||
83 | if (fsck_err_on(bkey_cmp(expected_start, bp->v.min_key), c, | |
a0b73c1c KO |
84 | "btree node with incorrect min_key at btree %s level %u:\n" |
85 | " prev %s\n" | |
86 | " cur %s", | |
87 | bch2_btree_ids[b->c.btree_id], b->c.level, | |
a66f7989 | 88 | buf1, |
a0b73c1c KO |
89 | (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(cur.k)), buf2))) |
90 | update_min = true; | |
d06c1a0c | 91 | } |
1c6fdbd8 | 92 | |
d06c1a0c | 93 | if (fsck_err_on(is_last && |
a66f7989 | 94 | bkey_cmp(cur.k->k.p, node_end), c, |
a0b73c1c KO |
95 | "btree node with incorrect max_key at btree %s level %u:\n" |
96 | " %s\n" | |
97 | " expected %s", | |
98 | bch2_btree_ids[b->c.btree_id], b->c.level, | |
a66f7989 | 99 | (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(cur.k)), buf1), |
a0b73c1c KO |
100 | (bch2_bpos_to_text(&PBUF(buf2), node_end), buf2))) |
101 | update_max = true; | |
a66f7989 KO |
102 | |
103 | bch2_bkey_buf_copy(prev, c, cur.k); | |
a0b73c1c KO |
104 | |
105 | if (update_min || update_max) { | |
106 | struct bkey_i *new; | |
107 | struct bkey_i_btree_ptr_v2 *bp = NULL; | |
108 | struct btree *n; | |
109 | ||
110 | if (update_max) { | |
111 | ret = bch2_journal_key_delete(c, b->c.btree_id, | |
112 | b->c.level, cur.k->k.p); | |
113 | if (ret) | |
114 | return ret; | |
115 | } | |
116 | ||
117 | new = kmalloc(bkey_bytes(&cur.k->k), GFP_KERNEL); | |
dab9ef0d KO |
118 | if (!new) { |
119 | bch_err(c, "%s: error allocating new key", __func__); | |
a0b73c1c | 120 | return -ENOMEM; |
dab9ef0d | 121 | } |
a0b73c1c KO |
122 | |
123 | bkey_copy(new, cur.k); | |
124 | ||
125 | if (new->k.type == KEY_TYPE_btree_ptr_v2) | |
126 | bp = bkey_i_to_btree_ptr_v2(new); | |
127 | ||
128 | if (update_min) | |
129 | bp->v.min_key = expected_start; | |
130 | if (update_max) | |
131 | new->k.p = node_end; | |
132 | if (bp) | |
133 | SET_BTREE_PTR_RANGE_UPDATED(&bp->v, true); | |
134 | ||
135 | ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level, new); | |
136 | if (ret) { | |
137 | kfree(new); | |
138 | return ret; | |
139 | } | |
140 | ||
141 | n = bch2_btree_node_get_noiter(c, cur.k, b->c.btree_id, | |
142 | b->c.level - 1, true); | |
143 | if (n) { | |
144 | mutex_lock(&c->btree_cache.lock); | |
145 | bch2_btree_node_hash_remove(&c->btree_cache, n); | |
146 | ||
147 | bkey_copy(&n->key, new); | |
148 | if (update_min) | |
149 | n->data->min_key = expected_start; | |
150 | if (update_max) | |
151 | n->data->max_key = node_end; | |
152 | ||
153 | ret = __bch2_btree_node_hash_insert(&c->btree_cache, n); | |
154 | BUG_ON(ret); | |
155 | mutex_unlock(&c->btree_cache.lock); | |
156 | six_unlock_read(&n->c.lock); | |
157 | } | |
158 | } | |
d06c1a0c KO |
159 | fsck_err: |
160 | return ret; | |
1c6fdbd8 KO |
161 | } |
162 | ||
5fc70d3a KO |
163 | static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id, |
164 | unsigned level, bool is_root, | |
165 | struct bkey_s_c *k) | |
166 | { | |
167 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(*k); | |
0507962f KO |
168 | const union bch_extent_entry *entry; |
169 | struct extent_ptr_decoded p; | |
5fc70d3a KO |
170 | bool do_update = false; |
171 | int ret = 0; | |
172 | ||
0507962f KO |
173 | bkey_for_each_ptr_decode(k->k, ptrs, p, entry) { |
174 | struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev); | |
175 | struct bucket *g = PTR_BUCKET(ca, &p.ptr, true); | |
176 | struct bucket *g2 = PTR_BUCKET(ca, &p.ptr, false); | |
5fc70d3a KO |
177 | |
178 | if (fsck_err_on(!g->gen_valid, c, | |
179 | "bucket %u:%zu data type %s ptr gen %u missing in alloc btree", | |
0507962f KO |
180 | p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), |
181 | bch2_data_types[ptr_data_type(k->k, &p.ptr)], | |
182 | p.ptr.gen)) { | |
183 | if (!p.ptr.cached) { | |
184 | g2->_mark.gen = g->_mark.gen = p.ptr.gen; | |
5fc70d3a KO |
185 | g2->gen_valid = g->gen_valid = true; |
186 | set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); | |
187 | } else { | |
188 | do_update = true; | |
189 | } | |
190 | } | |
191 | ||
0507962f | 192 | if (fsck_err_on(gen_cmp(p.ptr.gen, g->mark.gen) > 0, c, |
5fc70d3a | 193 | "bucket %u:%zu data type %s ptr gen in the future: %u > %u", |
0507962f KO |
194 | p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), |
195 | bch2_data_types[ptr_data_type(k->k, &p.ptr)], | |
196 | p.ptr.gen, g->mark.gen)) { | |
197 | if (!p.ptr.cached) { | |
198 | g2->_mark.gen = g->_mark.gen = p.ptr.gen; | |
5fc70d3a KO |
199 | g2->gen_valid = g->gen_valid = true; |
200 | g2->_mark.data_type = 0; | |
201 | g2->_mark.dirty_sectors = 0; | |
202 | g2->_mark.cached_sectors = 0; | |
203 | set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags); | |
204 | set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); | |
205 | } else { | |
206 | do_update = true; | |
207 | } | |
208 | } | |
209 | ||
0507962f KO |
210 | if (fsck_err_on(!p.ptr.cached && |
211 | gen_cmp(p.ptr.gen, g->mark.gen) < 0, c, | |
5fc70d3a | 212 | "bucket %u:%zu data type %s stale dirty ptr: %u < %u", |
0507962f KO |
213 | p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), |
214 | bch2_data_types[ptr_data_type(k->k, &p.ptr)], | |
215 | p.ptr.gen, g->mark.gen)) | |
5fc70d3a | 216 | do_update = true; |
0507962f KO |
217 | |
218 | if (p.has_ec) { | |
219 | struct stripe *m = genradix_ptr(&c->stripes[true], p.ec.idx); | |
220 | ||
221 | if (fsck_err_on(!m || !m->alive, c, | |
222 | "pointer to nonexistent stripe %llu", | |
223 | (u64) p.ec.idx)) | |
224 | do_update = true; | |
225 | } | |
5fc70d3a KO |
226 | } |
227 | ||
228 | if (do_update) { | |
0507962f KO |
229 | struct bkey_ptrs ptrs; |
230 | union bch_extent_entry *entry; | |
5fc70d3a KO |
231 | struct bch_extent_ptr *ptr; |
232 | struct bkey_i *new; | |
233 | ||
234 | if (is_root) { | |
235 | bch_err(c, "cannot update btree roots yet"); | |
236 | return -EINVAL; | |
237 | } | |
238 | ||
239 | new = kmalloc(bkey_bytes(k->k), GFP_KERNEL); | |
dab9ef0d KO |
240 | if (!new) { |
241 | bch_err(c, "%s: error allocating new key", __func__); | |
5fc70d3a | 242 | return -ENOMEM; |
dab9ef0d | 243 | } |
5fc70d3a KO |
244 | |
245 | bkey_reassemble(new, *k); | |
246 | ||
247 | bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({ | |
248 | struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); | |
249 | struct bucket *g = PTR_BUCKET(ca, ptr, true); | |
250 | ||
251 | (ptr->cached && | |
252 | (!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) || | |
253 | (!ptr->cached && | |
254 | gen_cmp(ptr->gen, g->mark.gen) < 0); | |
255 | })); | |
0507962f KO |
256 | again: |
257 | ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); | |
258 | bkey_extent_entry_for_each(ptrs, entry) { | |
259 | if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) { | |
260 | struct stripe *m = genradix_ptr(&c->stripes[true], | |
261 | entry->stripe_ptr.idx); | |
262 | ||
263 | if (!m || !m->alive) { | |
264 | bch2_bkey_extent_entry_drop(new, entry); | |
265 | goto again; | |
266 | } | |
267 | } | |
268 | } | |
5fc70d3a KO |
269 | |
270 | ret = bch2_journal_key_insert(c, btree_id, level, new); | |
271 | if (ret) | |
272 | kfree(new); | |
273 | else | |
274 | *k = bkey_i_to_s_c(new); | |
275 | } | |
276 | fsck_err: | |
277 | return ret; | |
278 | } | |
279 | ||
2252aa27 KO |
280 | /* marking of btree keys/nodes: */ |
281 | ||
5fc70d3a KO |
282 | static int bch2_gc_mark_key(struct bch_fs *c, enum btree_id btree_id, |
283 | unsigned level, bool is_root, | |
284 | struct bkey_s_c k, | |
d034c09b | 285 | u8 *max_stale, bool initial) |
2252aa27 | 286 | { |
26609b61 KO |
287 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
288 | const struct bch_extent_ptr *ptr; | |
47799326 | 289 | unsigned flags = |
2d594dfb KO |
290 | BTREE_TRIGGER_GC| |
291 | (initial ? BTREE_TRIGGER_NOATOMIC : 0); | |
2252aa27 KO |
292 | int ret = 0; |
293 | ||
91f8b567 | 294 | if (initial) { |
29364f34 | 295 | BUG_ON(bch2_journal_seq_verify && |
91f8b567 KO |
296 | k.k->version.lo > journal_cur_seq(&c->journal)); |
297 | ||
a9bc0a51 KO |
298 | if (fsck_err_on(k.k->version.lo > atomic64_read(&c->key_version), c, |
299 | "key version number higher than recorded: %llu > %llu", | |
300 | k.k->version.lo, | |
301 | atomic64_read(&c->key_version))) | |
91f8b567 KO |
302 | atomic64_set(&c->key_version, k.k->version.lo); |
303 | ||
304 | if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) || | |
988e98cf | 305 | fsck_err_on(!bch2_bkey_replicas_marked(c, k), c, |
91f8b567 | 306 | "superblock not marked as containing replicas (type %u)", |
26609b61 KO |
307 | k.k->type)) { |
308 | ret = bch2_mark_bkey_replicas(c, k); | |
dab9ef0d KO |
309 | if (ret) { |
310 | bch_err(c, "error marking bkey replicas: %i", ret); | |
311 | goto err; | |
312 | } | |
2252aa27 | 313 | } |
47799326 | 314 | |
5fc70d3a | 315 | ret = bch2_check_fix_ptrs(c, btree_id, level, is_root, &k); |
2252aa27 KO |
316 | } |
317 | ||
26609b61 KO |
318 | bkey_for_each_ptr(ptrs, ptr) { |
319 | struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); | |
39fbc5a4 | 320 | struct bucket *g = PTR_BUCKET(ca, ptr, true); |
26609b61 | 321 | |
76f4c7b0 KO |
322 | if (gen_after(g->oldest_gen, ptr->gen)) |
323 | g->oldest_gen = ptr->gen; | |
26609b61 KO |
324 | |
325 | *max_stale = max(*max_stale, ptr_stale(ca, ptr)); | |
326 | } | |
91f8b567 | 327 | |
2cbe5cfe | 328 | bch2_mark_key(c, k, 0, k.k->size, NULL, 0, flags); |
91f8b567 | 329 | fsck_err: |
dab9ef0d KO |
330 | err: |
331 | if (ret) | |
332 | bch_err(c, "%s: ret %i", __func__, ret); | |
2252aa27 KO |
333 | return ret; |
334 | } | |
335 | ||
e3e464ac | 336 | static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale, |
e62d65f2 | 337 | bool initial) |
1c6fdbd8 | 338 | { |
1c6fdbd8 KO |
339 | struct btree_node_iter iter; |
340 | struct bkey unpacked; | |
341 | struct bkey_s_c k; | |
a66f7989 | 342 | struct bkey_buf prev, cur; |
d034c09b KO |
343 | int ret = 0; |
344 | ||
345 | *max_stale = 0; | |
1c6fdbd8 | 346 | |
26609b61 | 347 | if (!btree_node_type_needs_gc(btree_node_type(b))) |
2252aa27 | 348 | return 0; |
1c6fdbd8 | 349 | |
d06c1a0c | 350 | bch2_btree_node_iter_init_from_start(&iter, b); |
a66f7989 KO |
351 | bch2_bkey_buf_init(&prev); |
352 | bch2_bkey_buf_init(&cur); | |
353 | bkey_init(&prev.k->k); | |
d06c1a0c KO |
354 | |
355 | while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) { | |
2252aa27 | 356 | bch2_bkey_debugcheck(c, b, k); |
1c6fdbd8 | 357 | |
5fc70d3a KO |
358 | ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false, |
359 | k, max_stale, initial); | |
d034c09b KO |
360 | if (ret) |
361 | break; | |
d06c1a0c KO |
362 | |
363 | bch2_btree_node_iter_advance(&iter, b); | |
364 | ||
365 | if (b->c.level) { | |
a66f7989 KO |
366 | bch2_bkey_buf_reassemble(&cur, c, k); |
367 | ||
368 | ret = bch2_gc_check_topology(c, b, &prev, cur, | |
d06c1a0c KO |
369 | bch2_btree_node_iter_end(&iter)); |
370 | if (ret) | |
371 | break; | |
372 | } | |
2252aa27 KO |
373 | } |
374 | ||
a66f7989 KO |
375 | bch2_bkey_buf_exit(&cur, c); |
376 | bch2_bkey_buf_exit(&prev, c); | |
d034c09b | 377 | return ret; |
1c6fdbd8 KO |
378 | } |
379 | ||
2252aa27 | 380 | static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, |
079663d8 | 381 | bool initial) |
1c6fdbd8 | 382 | { |
424eb881 KO |
383 | struct btree_trans trans; |
384 | struct btree_iter *iter; | |
1c6fdbd8 | 385 | struct btree *b; |
079663d8 | 386 | unsigned depth = bch2_expensive_debug_checks ? 0 |
4881fdb7 KO |
387 | : !btree_node_type_needs_gc(btree_id) ? 1 |
388 | : 0; | |
f7c0fcdd | 389 | u8 max_stale = 0; |
1c6fdbd8 KO |
390 | int ret = 0; |
391 | ||
20bceecb | 392 | bch2_trans_init(&trans, c, 0, 0); |
424eb881 | 393 | |
1c6fdbd8 KO |
394 | gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0)); |
395 | ||
424eb881 | 396 | __for_each_btree_node(&trans, iter, btree_id, POS_MIN, |
1c6fdbd8 | 397 | 0, depth, BTREE_ITER_PREFETCH, b) { |
1c6fdbd8 KO |
398 | bch2_verify_btree_nr_keys(b); |
399 | ||
8777210b KO |
400 | gc_pos_set(c, gc_pos_btree_node(b)); |
401 | ||
e62d65f2 | 402 | ret = btree_gc_mark_node(c, b, &max_stale, initial); |
d034c09b KO |
403 | if (ret) |
404 | break; | |
1c6fdbd8 | 405 | |
2252aa27 KO |
406 | if (!initial) { |
407 | if (max_stale > 64) | |
424eb881 | 408 | bch2_btree_node_rewrite(c, iter, |
2252aa27 | 409 | b->data->keys.seq, |
2252aa27 KO |
410 | BTREE_INSERT_NOWAIT| |
411 | BTREE_INSERT_GC_LOCK_HELD); | |
29364f34 KO |
412 | else if (!bch2_btree_gc_rewrite_disabled && |
413 | (bch2_btree_gc_always_rewrite || max_stale > 16)) | |
424eb881 | 414 | bch2_btree_node_rewrite(c, iter, |
2252aa27 KO |
415 | b->data->keys.seq, |
416 | BTREE_INSERT_NOWAIT| | |
417 | BTREE_INSERT_GC_LOCK_HELD); | |
418 | } | |
1c6fdbd8 | 419 | |
424eb881 | 420 | bch2_trans_cond_resched(&trans); |
1c6fdbd8 | 421 | } |
424eb881 | 422 | ret = bch2_trans_exit(&trans) ?: ret; |
1c6fdbd8 KO |
423 | if (ret) |
424 | return ret; | |
425 | ||
426 | mutex_lock(&c->btree_root_lock); | |
1c6fdbd8 KO |
427 | b = c->btree_roots[btree_id].b; |
428 | if (!btree_node_fake(b)) | |
5fc70d3a KO |
429 | ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true, |
430 | bkey_i_to_s_c(&b->key), | |
8b2b9d11 | 431 | &max_stale, initial); |
c43a6ef9 | 432 | gc_pos_set(c, gc_pos_btree_root(b->c.btree_id)); |
1c6fdbd8 | 433 | mutex_unlock(&c->btree_root_lock); |
8b2b9d11 KO |
434 | |
435 | return ret; | |
1c6fdbd8 KO |
436 | } |
437 | ||
e62d65f2 | 438 | static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, |
d06c1a0c | 439 | unsigned target_depth) |
e62d65f2 KO |
440 | { |
441 | struct btree_and_journal_iter iter; | |
442 | struct bkey_s_c k; | |
a66f7989 | 443 | struct bkey_buf cur, prev; |
e62d65f2 KO |
444 | u8 max_stale = 0; |
445 | int ret = 0; | |
446 | ||
5b593ee1 | 447 | bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); |
a66f7989 KO |
448 | bch2_bkey_buf_init(&prev); |
449 | bch2_bkey_buf_init(&cur); | |
450 | bkey_init(&prev.k->k); | |
e62d65f2 KO |
451 | |
452 | while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { | |
453 | bch2_bkey_debugcheck(c, b, k); | |
454 | ||
d06c1a0c KO |
455 | BUG_ON(bkey_cmp(k.k->p, b->data->min_key) < 0); |
456 | BUG_ON(bkey_cmp(k.k->p, b->data->max_key) > 0); | |
457 | ||
5fc70d3a KO |
458 | ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false, |
459 | k, &max_stale, true); | |
dab9ef0d KO |
460 | if (ret) { |
461 | bch_err(c, "%s: error %i from bch2_gc_mark_key", __func__, ret); | |
e62d65f2 | 462 | break; |
dab9ef0d | 463 | } |
e62d65f2 | 464 | |
d06c1a0c | 465 | if (b->c.level) { |
a66f7989 KO |
466 | bch2_bkey_buf_reassemble(&cur, c, k); |
467 | k = bkey_i_to_s_c(cur.k); | |
d06c1a0c KO |
468 | |
469 | bch2_btree_and_journal_iter_advance(&iter); | |
e62d65f2 | 470 | |
a66f7989 KO |
471 | ret = bch2_gc_check_topology(c, b, |
472 | &prev, cur, | |
d06c1a0c | 473 | !bch2_btree_and_journal_iter_peek(&iter).k); |
e62d65f2 KO |
474 | if (ret) |
475 | break; | |
a0b73c1c KO |
476 | } else { |
477 | bch2_btree_and_journal_iter_advance(&iter); | |
478 | } | |
479 | } | |
e62d65f2 | 480 | |
a0b73c1c KO |
481 | if (b->c.level > target_depth) { |
482 | bch2_btree_and_journal_iter_exit(&iter); | |
483 | bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); | |
484 | ||
485 | while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { | |
486 | struct btree *child; | |
487 | ||
488 | bch2_bkey_buf_reassemble(&cur, c, k); | |
489 | bch2_btree_and_journal_iter_advance(&iter); | |
e62d65f2 | 490 | |
a0b73c1c KO |
491 | child = bch2_btree_node_get_noiter(c, cur.k, |
492 | b->c.btree_id, b->c.level - 1, | |
493 | false); | |
494 | ret = PTR_ERR_OR_ZERO(child); | |
d06c1a0c | 495 | |
a0b73c1c KO |
496 | if (fsck_err_on(ret == -EIO, c, |
497 | "unreadable btree node")) { | |
498 | ret = bch2_journal_key_delete(c, b->c.btree_id, | |
499 | b->c.level, cur.k->k.p); | |
d06c1a0c | 500 | if (ret) |
a0b73c1c KO |
501 | return ret; |
502 | ||
503 | set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags); | |
504 | continue; | |
d06c1a0c | 505 | } |
a0b73c1c | 506 | |
dab9ef0d KO |
507 | if (ret) { |
508 | bch_err(c, "%s: error %i getting btree node", | |
509 | __func__, ret); | |
a0b73c1c | 510 | break; |
dab9ef0d | 511 | } |
a0b73c1c KO |
512 | |
513 | ret = bch2_gc_btree_init_recurse(c, child, | |
514 | target_depth); | |
515 | six_unlock_read(&child->c.lock); | |
516 | ||
517 | if (ret) | |
518 | break; | |
d06c1a0c | 519 | } |
e62d65f2 | 520 | } |
a0b73c1c | 521 | fsck_err: |
a66f7989 KO |
522 | bch2_bkey_buf_exit(&cur, c); |
523 | bch2_bkey_buf_exit(&prev, c); | |
5b593ee1 | 524 | bch2_btree_and_journal_iter_exit(&iter); |
e62d65f2 KO |
525 | return ret; |
526 | } | |
527 | ||
528 | static int bch2_gc_btree_init(struct bch_fs *c, | |
079663d8 | 529 | enum btree_id btree_id) |
e62d65f2 KO |
530 | { |
531 | struct btree *b; | |
079663d8 KO |
532 | unsigned target_depth = bch2_expensive_debug_checks ? 0 |
533 | : !btree_node_type_needs_gc(btree_id) ? 1 | |
e62d65f2 KO |
534 | : 0; |
535 | u8 max_stale = 0; | |
536 | int ret = 0; | |
537 | ||
538 | b = c->btree_roots[btree_id].b; | |
539 | ||
540 | if (btree_node_fake(b)) | |
541 | return 0; | |
542 | ||
543 | six_lock_read(&b->c.lock, NULL, NULL); | |
d06c1a0c KO |
544 | if (fsck_err_on(bkey_cmp(b->data->min_key, POS_MIN), c, |
545 | "btree root with incorrect min_key: %llu:%llu", | |
546 | b->data->min_key.inode, | |
547 | b->data->min_key.offset)) { | |
548 | BUG(); | |
549 | } | |
550 | ||
551 | if (fsck_err_on(bkey_cmp(b->data->max_key, POS_MAX), c, | |
552 | "btree root with incorrect min_key: %llu:%llu", | |
553 | b->data->max_key.inode, | |
554 | b->data->max_key.offset)) { | |
555 | BUG(); | |
556 | } | |
557 | ||
e62d65f2 | 558 | if (b->c.level >= target_depth) |
5b593ee1 | 559 | ret = bch2_gc_btree_init_recurse(c, b, target_depth); |
e62d65f2 KO |
560 | |
561 | if (!ret) | |
5fc70d3a KO |
562 | ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true, |
563 | bkey_i_to_s_c(&b->key), | |
e62d65f2 | 564 | &max_stale, true); |
d06c1a0c | 565 | fsck_err: |
e62d65f2 KO |
566 | six_unlock_read(&b->c.lock); |
567 | ||
dab9ef0d KO |
568 | if (ret) |
569 | bch_err(c, "%s: ret %i", __func__, ret); | |
e62d65f2 KO |
570 | return ret; |
571 | } | |
572 | ||
cd575ddf KO |
573 | static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r) |
574 | { | |
575 | return (int) btree_id_to_gc_phase(l) - | |
576 | (int) btree_id_to_gc_phase(r); | |
577 | } | |
578 | ||
5b593ee1 | 579 | static int bch2_gc_btrees(struct bch_fs *c, bool initial) |
2252aa27 | 580 | { |
cd575ddf | 581 | enum btree_id ids[BTREE_ID_NR]; |
2252aa27 KO |
582 | unsigned i; |
583 | ||
cd575ddf KO |
584 | for (i = 0; i < BTREE_ID_NR; i++) |
585 | ids[i] = i; | |
586 | bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp); | |
587 | ||
2252aa27 | 588 | for (i = 0; i < BTREE_ID_NR; i++) { |
cd575ddf | 589 | enum btree_id id = ids[i]; |
e62d65f2 | 590 | int ret = initial |
5b593ee1 | 591 | ? bch2_gc_btree_init(c, id) |
079663d8 | 592 | : bch2_gc_btree(c, id, initial); |
dab9ef0d KO |
593 | if (ret) { |
594 | bch_err(c, "%s: ret %i", __func__, ret); | |
2252aa27 | 595 | return ret; |
dab9ef0d | 596 | } |
2252aa27 KO |
597 | } |
598 | ||
599 | return 0; | |
600 | } | |
601 | ||
1c6fdbd8 KO |
602 | static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca, |
603 | u64 start, u64 end, | |
604 | enum bch_data_type type, | |
605 | unsigned flags) | |
606 | { | |
607 | u64 b = sector_to_bucket(ca, start); | |
608 | ||
609 | do { | |
610 | unsigned sectors = | |
611 | min_t(u64, bucket_to_sector(ca, b + 1), end) - start; | |
612 | ||
613 | bch2_mark_metadata_bucket(c, ca, b, type, sectors, | |
614 | gc_phase(GC_PHASE_SB), flags); | |
615 | b++; | |
616 | start += sectors; | |
617 | } while (start < end); | |
618 | } | |
619 | ||
620 | void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, | |
621 | unsigned flags) | |
622 | { | |
623 | struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; | |
624 | unsigned i; | |
625 | u64 b; | |
626 | ||
97446a24 KO |
627 | /* |
628 | * This conditional is kind of gross, but we may be called from the | |
629 | * device add path, before the new device has actually been added to the | |
630 | * running filesystem: | |
631 | */ | |
1c6fdbd8 KO |
632 | if (c) { |
633 | lockdep_assert_held(&c->sb_lock); | |
9166b41d | 634 | percpu_down_read(&c->mark_lock); |
1c6fdbd8 KO |
635 | } |
636 | ||
637 | for (i = 0; i < layout->nr_superblocks; i++) { | |
638 | u64 offset = le64_to_cpu(layout->sb_offset[i]); | |
639 | ||
640 | if (offset == BCH_SB_SECTOR) | |
641 | mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR, | |
89fd25be | 642 | BCH_DATA_sb, flags); |
1c6fdbd8 KO |
643 | |
644 | mark_metadata_sectors(c, ca, offset, | |
645 | offset + (1 << layout->sb_max_size_bits), | |
89fd25be | 646 | BCH_DATA_sb, flags); |
1c6fdbd8 KO |
647 | } |
648 | ||
1c6fdbd8 KO |
649 | for (i = 0; i < ca->journal.nr; i++) { |
650 | b = ca->journal.buckets[i]; | |
89fd25be | 651 | bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal, |
1c6fdbd8 KO |
652 | ca->mi.bucket_size, |
653 | gc_phase(GC_PHASE_SB), flags); | |
654 | } | |
655 | ||
3a0e06db | 656 | if (c) |
9166b41d | 657 | percpu_up_read(&c->mark_lock); |
1c6fdbd8 KO |
658 | } |
659 | ||
660 | static void bch2_mark_superblocks(struct bch_fs *c) | |
661 | { | |
662 | struct bch_dev *ca; | |
663 | unsigned i; | |
664 | ||
665 | mutex_lock(&c->sb_lock); | |
666 | gc_pos_set(c, gc_phase(GC_PHASE_SB)); | |
667 | ||
668 | for_each_online_member(ca, c, i) | |
2d594dfb | 669 | bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC); |
1c6fdbd8 KO |
670 | mutex_unlock(&c->sb_lock); |
671 | } | |
672 | ||
00b8ccf7 | 673 | #if 0 |
1c6fdbd8 KO |
674 | /* Also see bch2_pending_btree_node_free_insert_done() */ |
675 | static void bch2_mark_pending_btree_node_frees(struct bch_fs *c) | |
676 | { | |
1c6fdbd8 KO |
677 | struct btree_update *as; |
678 | struct pending_btree_node_free *d; | |
679 | ||
680 | mutex_lock(&c->btree_interior_update_lock); | |
681 | gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE)); | |
682 | ||
683 | for_each_pending_btree_node_free(c, as, d) | |
684 | if (d->index_update_done) | |
2cbe5cfe KO |
685 | bch2_mark_key(c, bkey_i_to_s_c(&d->key), |
686 | 0, 0, NULL, 0, | |
2d594dfb | 687 | BTREE_TRIGGER_GC); |
1c6fdbd8 KO |
688 | |
689 | mutex_unlock(&c->btree_interior_update_lock); | |
690 | } | |
00b8ccf7 | 691 | #endif |
1c6fdbd8 KO |
692 | |
693 | static void bch2_mark_allocator_buckets(struct bch_fs *c) | |
694 | { | |
695 | struct bch_dev *ca; | |
696 | struct open_bucket *ob; | |
697 | size_t i, j, iter; | |
698 | unsigned ci; | |
699 | ||
9166b41d | 700 | percpu_down_read(&c->mark_lock); |
1c6fdbd8 KO |
701 | |
702 | spin_lock(&c->freelist_lock); | |
703 | gc_pos_set(c, gc_pos_alloc(c, NULL)); | |
704 | ||
705 | for_each_member_device(ca, c, ci) { | |
706 | fifo_for_each_entry(i, &ca->free_inc, iter) | |
707 | bch2_mark_alloc_bucket(c, ca, i, true, | |
708 | gc_pos_alloc(c, NULL), | |
2d594dfb | 709 | BTREE_TRIGGER_GC); |
1c6fdbd8 KO |
710 | |
711 | ||
712 | ||
713 | for (j = 0; j < RESERVE_NR; j++) | |
714 | fifo_for_each_entry(i, &ca->free[j], iter) | |
715 | bch2_mark_alloc_bucket(c, ca, i, true, | |
716 | gc_pos_alloc(c, NULL), | |
2d594dfb | 717 | BTREE_TRIGGER_GC); |
1c6fdbd8 KO |
718 | } |
719 | ||
720 | spin_unlock(&c->freelist_lock); | |
721 | ||
722 | for (ob = c->open_buckets; | |
723 | ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); | |
724 | ob++) { | |
725 | spin_lock(&ob->lock); | |
726 | if (ob->valid) { | |
727 | gc_pos_set(c, gc_pos_alloc(c, ob)); | |
728 | ca = bch_dev_bkey_exists(c, ob->ptr.dev); | |
729 | bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), true, | |
730 | gc_pos_alloc(c, ob), | |
2d594dfb | 731 | BTREE_TRIGGER_GC); |
1c6fdbd8 KO |
732 | } |
733 | spin_unlock(&ob->lock); | |
734 | } | |
735 | ||
9166b41d | 736 | percpu_up_read(&c->mark_lock); |
1c6fdbd8 KO |
737 | } |
738 | ||
9ca53b55 KO |
739 | static void bch2_gc_free(struct bch_fs *c) |
740 | { | |
741 | struct bch_dev *ca; | |
742 | unsigned i; | |
743 | ||
dfe9bfb3 KO |
744 | genradix_free(&c->stripes[1]); |
745 | ||
9ca53b55 KO |
746 | for_each_member_device(ca, c, i) { |
747 | kvpfree(rcu_dereference_protected(ca->buckets[1], 1), | |
748 | sizeof(struct bucket_array) + | |
749 | ca->mi.nbuckets * sizeof(struct bucket)); | |
750 | ca->buckets[1] = NULL; | |
751 | ||
180fb49d KO |
752 | free_percpu(ca->usage_gc); |
753 | ca->usage_gc = NULL; | |
9ca53b55 KO |
754 | } |
755 | ||
5e82a9a1 KO |
756 | free_percpu(c->usage_gc); |
757 | c->usage_gc = NULL; | |
06b7345c KO |
758 | } |
759 | ||
a1d58243 | 760 | static int bch2_gc_done(struct bch_fs *c, |
079663d8 | 761 | bool initial) |
9ca53b55 KO |
762 | { |
763 | struct bch_dev *ca; | |
079663d8 KO |
764 | bool verify = (!initial || |
765 | (c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO))); | |
180fb49d | 766 | unsigned i, dev; |
cccf4e6d | 767 | int ret = 0; |
9ca53b55 KO |
768 | |
769 | #define copy_field(_f, _msg, ...) \ | |
23f80d2b | 770 | if (dst->_f != src->_f) { \ |
76f4c7b0 | 771 | if (verify) \ |
cccf4e6d | 772 | fsck_err(c, _msg ": got %llu, should be %llu" \ |
76f4c7b0 | 773 | , ##__VA_ARGS__, dst->_f, src->_f); \ |
23f80d2b | 774 | dst->_f = src->_f; \ |
4291a331 | 775 | set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \ |
9ca53b55 | 776 | } |
dfe9bfb3 KO |
777 | #define copy_stripe_field(_f, _msg, ...) \ |
778 | if (dst->_f != src->_f) { \ | |
76f4c7b0 | 779 | if (verify) \ |
cccf4e6d KO |
780 | fsck_err(c, "stripe %zu has wrong "_msg \ |
781 | ": got %u, should be %u", \ | |
a39c74be | 782 | iter.pos, ##__VA_ARGS__, \ |
76f4c7b0 | 783 | dst->_f, src->_f); \ |
dfe9bfb3 | 784 | dst->_f = src->_f; \ |
4291a331 | 785 | set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \ |
dfe9bfb3 | 786 | } |
9ca53b55 KO |
787 | #define copy_bucket_field(_f) \ |
788 | if (dst->b[b].mark._f != src->b[b].mark._f) { \ | |
76f4c7b0 | 789 | if (verify) \ |
aafcf9bc | 790 | fsck_err(c, "bucket %u:%zu gen %u data type %s has wrong " #_f \ |
cccf4e6d | 791 | ": got %u, should be %u", i, b, \ |
aafcf9bc KO |
792 | dst->b[b].mark.gen, \ |
793 | bch2_data_types[dst->b[b].mark.data_type],\ | |
76f4c7b0 | 794 | dst->b[b].mark._f, src->b[b].mark._f); \ |
9ca53b55 | 795 | dst->b[b]._mark._f = src->b[b].mark._f; \ |
4291a331 | 796 | set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \ |
9ca53b55 KO |
797 | } |
798 | #define copy_dev_field(_f, _msg, ...) \ | |
799 | copy_field(_f, "dev %u has wrong " _msg, i, ##__VA_ARGS__) | |
800 | #define copy_fs_field(_f, _msg, ...) \ | |
801 | copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__) | |
802 | ||
079663d8 | 803 | { |
a39c74be | 804 | struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0); |
dfe9bfb3 | 805 | struct stripe *dst, *src; |
dfe9bfb3 | 806 | |
a39c74be KO |
807 | while ((src = genradix_iter_peek(&iter, &c->stripes[1]))) { |
808 | dst = genradix_ptr_alloc(&c->stripes[0], iter.pos, GFP_KERNEL); | |
61c8d7c8 | 809 | |
6e53151b KO |
810 | if (dst->alive != src->alive || |
811 | dst->sectors != src->sectors || | |
812 | dst->algorithm != src->algorithm || | |
813 | dst->nr_blocks != src->nr_blocks || | |
814 | dst->nr_redundant != src->nr_redundant) { | |
815 | bch_err(c, "unexpected stripe inconsistency at bch2_gc_done, confused"); | |
816 | ret = -EINVAL; | |
817 | goto fsck_err; | |
818 | } | |
dfe9bfb3 KO |
819 | |
820 | for (i = 0; i < ARRAY_SIZE(dst->block_sectors); i++) | |
61c8d7c8 | 821 | copy_stripe_field(block_sectors[i], |
dfe9bfb3 KO |
822 | "block_sectors[%u]", i); |
823 | ||
6e53151b KO |
824 | dst->blocks_nonempty = 0; |
825 | for (i = 0; i < dst->nr_blocks; i++) | |
826 | dst->blocks_nonempty += dst->block_sectors[i] != 0; | |
827 | ||
a39c74be | 828 | genradix_iter_advance(&iter, &c->stripes[1]); |
dfe9bfb3 KO |
829 | } |
830 | } | |
831 | ||
180fb49d KO |
832 | for (i = 0; i < ARRAY_SIZE(c->usage); i++) |
833 | bch2_fs_usage_acc_to_base(c, i); | |
834 | ||
835 | for_each_member_device(ca, c, dev) { | |
9ca53b55 KO |
836 | struct bucket_array *dst = __bucket_array(ca, 0); |
837 | struct bucket_array *src = __bucket_array(ca, 1); | |
838 | size_t b; | |
839 | ||
9ca53b55 KO |
840 | for (b = 0; b < src->nbuckets; b++) { |
841 | copy_bucket_field(gen); | |
842 | copy_bucket_field(data_type); | |
843 | copy_bucket_field(owned_by_allocator); | |
844 | copy_bucket_field(stripe); | |
845 | copy_bucket_field(dirty_sectors); | |
846 | copy_bucket_field(cached_sectors); | |
76f4c7b0 | 847 | |
6671a708 | 848 | dst->b[b].oldest_gen = src->b[b].oldest_gen; |
9ca53b55 | 849 | } |
9ca53b55 | 850 | |
180fb49d KO |
851 | { |
852 | struct bch_dev_usage *dst = ca->usage_base; | |
853 | struct bch_dev_usage *src = (void *) | |
854 | bch2_acc_percpu_u64s((void *) ca->usage_gc, | |
855 | dev_usage_u64s()); | |
856 | ||
857 | copy_dev_field(buckets_ec, "buckets_ec"); | |
858 | copy_dev_field(buckets_unavailable, "buckets_unavailable"); | |
5e82a9a1 | 859 | |
180fb49d KO |
860 | for (i = 0; i < BCH_DATA_NR; i++) { |
861 | copy_dev_field(d[i].buckets, "%s buckets", bch2_data_types[i]); | |
862 | copy_dev_field(d[i].sectors, "%s sectors", bch2_data_types[i]); | |
863 | copy_dev_field(d[i].fragmented, "%s fragmented", bch2_data_types[i]); | |
864 | } | |
865 | } | |
866 | }; | |
9ca53b55 KO |
867 | |
868 | { | |
ecf37a4a | 869 | unsigned nr = fs_usage_u64s(c); |
5e82a9a1 | 870 | struct bch_fs_usage *dst = c->usage_base; |
23f80d2b | 871 | struct bch_fs_usage *src = (void *) |
5e82a9a1 | 872 | bch2_acc_percpu_u64s((void *) c->usage_gc, nr); |
9ca53b55 | 873 | |
768ac639 | 874 | copy_fs_field(hidden, "hidden"); |
a1d58243 | 875 | copy_fs_field(btree, "btree"); |
079663d8 KO |
876 | copy_fs_field(data, "data"); |
877 | copy_fs_field(cached, "cached"); | |
878 | copy_fs_field(reserved, "reserved"); | |
879 | copy_fs_field(nr_inodes,"nr_inodes"); | |
06b7345c | 880 | |
079663d8 KO |
881 | for (i = 0; i < BCH_REPLICAS_MAX; i++) |
882 | copy_fs_field(persistent_reserved[i], | |
883 | "persistent_reserved[%i]", i); | |
9ca53b55 | 884 | |
7ef2a73a | 885 | for (i = 0; i < c->replicas.nr; i++) { |
8777210b KO |
886 | struct bch_replicas_entry *e = |
887 | cpu_replicas_entry(&c->replicas, i); | |
888 | char buf[80]; | |
889 | ||
890 | bch2_replicas_entry_to_text(&PBUF(buf), e); | |
891 | ||
768ac639 | 892 | copy_fs_field(replicas[i], "%s", buf); |
7ef2a73a | 893 | } |
9ca53b55 | 894 | } |
76f4c7b0 | 895 | |
9ca53b55 KO |
896 | #undef copy_fs_field |
897 | #undef copy_dev_field | |
898 | #undef copy_bucket_field | |
dfe9bfb3 KO |
899 | #undef copy_stripe_field |
900 | #undef copy_field | |
cccf4e6d | 901 | fsck_err: |
dab9ef0d KO |
902 | if (ret) |
903 | bch_err(c, "%s: ret %i", __func__, ret); | |
cccf4e6d | 904 | return ret; |
9ca53b55 KO |
905 | } |
906 | ||
079663d8 | 907 | static int bch2_gc_start(struct bch_fs *c) |
9ca53b55 KO |
908 | { |
909 | struct bch_dev *ca; | |
910 | unsigned i; | |
0741d378 | 911 | int ret; |
dfe9bfb3 | 912 | |
5e82a9a1 | 913 | BUG_ON(c->usage_gc); |
9ca53b55 | 914 | |
5e82a9a1 | 915 | c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64), |
ecf37a4a | 916 | sizeof(u64), GFP_KERNEL); |
1e1a31c4 KO |
917 | if (!c->usage_gc) { |
918 | bch_err(c, "error allocating c->usage_gc"); | |
9ca53b55 | 919 | return -ENOMEM; |
1e1a31c4 | 920 | } |
9ca53b55 | 921 | |
1c6fdbd8 | 922 | for_each_member_device(ca, c, i) { |
9ca53b55 | 923 | BUG_ON(ca->buckets[1]); |
180fb49d | 924 | BUG_ON(ca->usage_gc); |
9ca53b55 KO |
925 | |
926 | ca->buckets[1] = kvpmalloc(sizeof(struct bucket_array) + | |
927 | ca->mi.nbuckets * sizeof(struct bucket), | |
928 | GFP_KERNEL|__GFP_ZERO); | |
929 | if (!ca->buckets[1]) { | |
930 | percpu_ref_put(&ca->ref); | |
1e1a31c4 | 931 | bch_err(c, "error allocating ca->buckets[gc]"); |
9ca53b55 KO |
932 | return -ENOMEM; |
933 | } | |
934 | ||
180fb49d KO |
935 | ca->usage_gc = alloc_percpu(struct bch_dev_usage); |
936 | if (!ca->usage_gc) { | |
937 | bch_err(c, "error allocating ca->usage_gc"); | |
9ca53b55 KO |
938 | percpu_ref_put(&ca->ref); |
939 | return -ENOMEM; | |
1c6fdbd8 | 940 | } |
1c6fdbd8 | 941 | } |
9ca53b55 | 942 | |
0741d378 | 943 | ret = bch2_ec_mem_alloc(c, true); |
1e1a31c4 KO |
944 | if (ret) { |
945 | bch_err(c, "error allocating ec gc mem"); | |
0741d378 | 946 | return ret; |
1e1a31c4 | 947 | } |
0741d378 KO |
948 | |
949 | percpu_down_write(&c->mark_lock); | |
950 | ||
951 | /* | |
952 | * indicate to stripe code that we need to allocate for the gc stripes | |
953 | * radix tree, too | |
954 | */ | |
955 | gc_pos_set(c, gc_phase(GC_PHASE_START)); | |
956 | ||
9ca53b55 KO |
957 | for_each_member_device(ca, c, i) { |
958 | struct bucket_array *dst = __bucket_array(ca, 1); | |
959 | struct bucket_array *src = __bucket_array(ca, 0); | |
960 | size_t b; | |
961 | ||
962 | dst->first_bucket = src->first_bucket; | |
963 | dst->nbuckets = src->nbuckets; | |
964 | ||
28062d32 | 965 | for (b = 0; b < src->nbuckets; b++) { |
a1d58243 KO |
966 | struct bucket *d = &dst->b[b]; |
967 | struct bucket *s = &src->b[b]; | |
968 | ||
969 | d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen; | |
970 | d->gen_valid = s->gen_valid; | |
28062d32 | 971 | } |
9ca53b55 KO |
972 | }; |
973 | ||
0741d378 KO |
974 | percpu_up_write(&c->mark_lock); |
975 | ||
976 | return 0; | |
1c6fdbd8 KO |
977 | } |
978 | ||
979 | /** | |
9ca53b55 KO |
980 | * bch2_gc - walk _all_ references to buckets, and recompute them: |
981 | * | |
982 | * Order matters here: | |
983 | * - Concurrent GC relies on the fact that we have a total ordering for | |
984 | * everything that GC walks - see gc_will_visit_node(), | |
985 | * gc_will_visit_root() | |
986 | * | |
987 | * - also, references move around in the course of index updates and | |
988 | * various other crap: everything needs to agree on the ordering | |
989 | * references are allowed to move around in - e.g., we're allowed to | |
990 | * start with a reference owned by an open_bucket (the allocator) and | |
991 | * move it to the btree, but not the reverse. | |
992 | * | |
993 | * This is necessary to ensure that gc doesn't miss references that | |
994 | * move around - if references move backwards in the ordering GC | |
995 | * uses, GC could skip past them | |
1c6fdbd8 | 996 | */ |
5b593ee1 | 997 | int bch2_gc(struct bch_fs *c, bool initial) |
1c6fdbd8 KO |
998 | { |
999 | struct bch_dev *ca; | |
1000 | u64 start_time = local_clock(); | |
9ca53b55 | 1001 | unsigned i, iter = 0; |
2252aa27 | 1002 | int ret; |
1c6fdbd8 | 1003 | |
1ada1606 | 1004 | lockdep_assert_held(&c->state_lock); |
1c6fdbd8 KO |
1005 | trace_gc_start(c); |
1006 | ||
1c6fdbd8 | 1007 | down_write(&c->gc_lock); |
00b8ccf7 KO |
1008 | |
1009 | /* flush interior btree updates: */ | |
1010 | closure_wait_event(&c->btree_interior_update_wait, | |
1011 | !bch2_btree_interior_updates_nr_pending(c)); | |
9ca53b55 | 1012 | again: |
079663d8 | 1013 | ret = bch2_gc_start(c); |
9ca53b55 | 1014 | if (ret) |
1c6fdbd8 KO |
1015 | goto out; |
1016 | ||
1c6fdbd8 KO |
1017 | bch2_mark_superblocks(c); |
1018 | ||
5b593ee1 | 1019 | ret = bch2_gc_btrees(c, initial); |
9ca53b55 | 1020 | if (ret) |
2252aa27 | 1021 | goto out; |
1c6fdbd8 | 1022 | |
00b8ccf7 | 1023 | #if 0 |
1c6fdbd8 | 1024 | bch2_mark_pending_btree_node_frees(c); |
00b8ccf7 | 1025 | #endif |
1c6fdbd8 KO |
1026 | bch2_mark_allocator_buckets(c); |
1027 | ||
1c6fdbd8 | 1028 | c->gc_count++; |
a0b73c1c KO |
1029 | |
1030 | if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) || | |
1031 | (!iter && bch2_test_restart_gc)) { | |
9ca53b55 KO |
1032 | /* |
1033 | * XXX: make sure gens we fixed got saved | |
1034 | */ | |
1035 | if (iter++ <= 2) { | |
a0b73c1c KO |
1036 | bch_info(c, "Second GC pass needed, restarting:"); |
1037 | clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags); | |
28062d32 | 1038 | __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING)); |
05b3d5ac KO |
1039 | |
1040 | percpu_down_write(&c->mark_lock); | |
28062d32 | 1041 | bch2_gc_free(c); |
05b3d5ac | 1042 | percpu_up_write(&c->mark_lock); |
89b05118 KO |
1043 | /* flush fsck errors, reset counters */ |
1044 | bch2_flush_fsck_errs(c); | |
05b3d5ac | 1045 | |
9ca53b55 KO |
1046 | goto again; |
1047 | } | |
1048 | ||
1049 | bch_info(c, "Unable to fix bucket gens, looping"); | |
1050 | ret = -EINVAL; | |
1051 | } | |
a0b73c1c | 1052 | out: |
5e82a9a1 KO |
1053 | if (!ret) { |
1054 | bch2_journal_block(&c->journal); | |
05b3d5ac | 1055 | |
5e82a9a1 | 1056 | percpu_down_write(&c->mark_lock); |
079663d8 | 1057 | ret = bch2_gc_done(c, initial); |
9ca53b55 | 1058 | |
5e82a9a1 KO |
1059 | bch2_journal_unblock(&c->journal); |
1060 | } else { | |
1061 | percpu_down_write(&c->mark_lock); | |
1062 | } | |
1063 | ||
9ca53b55 | 1064 | /* Indicates that gc is no longer in progress: */ |
dfe9bfb3 | 1065 | __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING)); |
9ca53b55 KO |
1066 | |
1067 | bch2_gc_free(c); | |
05b3d5ac KO |
1068 | percpu_up_write(&c->mark_lock); |
1069 | ||
1c6fdbd8 | 1070 | up_write(&c->gc_lock); |
9ca53b55 | 1071 | |
1c6fdbd8 KO |
1072 | trace_gc_end(c); |
1073 | bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time); | |
1074 | ||
1075 | /* | |
1076 | * Wake up allocator in case it was waiting for buckets | |
1077 | * because of not being able to inc gens | |
1078 | */ | |
1079 | for_each_member_device(ca, c, i) | |
1080 | bch2_wake_allocator(ca); | |
1081 | ||
1082 | /* | |
1083 | * At startup, allocations can happen directly instead of via the | |
1084 | * allocator thread - issue wakeup in case they blocked on gc_lock: | |
1085 | */ | |
1086 | closure_wake_up(&c->freelist_wait); | |
9ca53b55 | 1087 | return ret; |
1c6fdbd8 KO |
1088 | } |
1089 | ||
c47c50f8 KO |
1090 | static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k) |
1091 | { | |
1092 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
1093 | const struct bch_extent_ptr *ptr; | |
1094 | ||
1095 | percpu_down_read(&c->mark_lock); | |
1096 | bkey_for_each_ptr(ptrs, ptr) { | |
1097 | struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); | |
1098 | struct bucket *g = PTR_BUCKET(ca, ptr, false); | |
1099 | ||
1100 | if (gen_after(g->mark.gen, ptr->gen) > 16) { | |
1101 | percpu_up_read(&c->mark_lock); | |
1102 | return true; | |
1103 | } | |
1104 | } | |
1105 | ||
1106 | bkey_for_each_ptr(ptrs, ptr) { | |
1107 | struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); | |
1108 | struct bucket *g = PTR_BUCKET(ca, ptr, false); | |
1109 | ||
1110 | if (gen_after(g->gc_gen, ptr->gen)) | |
1111 | g->gc_gen = ptr->gen; | |
1112 | } | |
1113 | percpu_up_read(&c->mark_lock); | |
1114 | ||
1115 | return false; | |
1116 | } | |
1117 | ||
451570a5 KO |
1118 | /* |
1119 | * For recalculating oldest gen, we only need to walk keys in leaf nodes; btree | |
1120 | * node pointers currently never have cached pointers that can become stale: | |
1121 | */ | |
c47c50f8 | 1122 | static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id) |
451570a5 KO |
1123 | { |
1124 | struct btree_trans trans; | |
1125 | struct btree_iter *iter; | |
1126 | struct bkey_s_c k; | |
07a1006a | 1127 | struct bkey_buf sk; |
c47c50f8 | 1128 | int ret = 0; |
451570a5 | 1129 | |
07a1006a | 1130 | bch2_bkey_buf_init(&sk); |
451570a5 KO |
1131 | bch2_trans_init(&trans, c, 0, 0); |
1132 | ||
c47c50f8 KO |
1133 | iter = bch2_trans_get_iter(&trans, btree_id, POS_MIN, |
1134 | BTREE_ITER_PREFETCH); | |
451570a5 | 1135 | |
c47c50f8 KO |
1136 | while ((k = bch2_btree_iter_peek(iter)).k && |
1137 | !(ret = bkey_err(k))) { | |
1138 | if (gc_btree_gens_key(c, k)) { | |
07a1006a | 1139 | bch2_bkey_buf_reassemble(&sk, c, k); |
c47c50f8 | 1140 | bch2_extent_normalize(c, bkey_i_to_s(sk.k)); |
451570a5 | 1141 | |
c47c50f8 | 1142 | bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k)); |
451570a5 | 1143 | |
c47c50f8 | 1144 | bch2_trans_update(&trans, iter, sk.k, 0); |
451570a5 | 1145 | |
c47c50f8 KO |
1146 | ret = bch2_trans_commit(&trans, NULL, NULL, |
1147 | BTREE_INSERT_NOFAIL); | |
1148 | if (ret == -EINTR) | |
1149 | continue; | |
1150 | if (ret) { | |
1151 | break; | |
451570a5 KO |
1152 | } |
1153 | } | |
c47c50f8 KO |
1154 | |
1155 | bch2_btree_iter_next(iter); | |
451570a5 KO |
1156 | } |
1157 | ||
1158 | bch2_trans_exit(&trans); | |
07a1006a | 1159 | bch2_bkey_buf_exit(&sk, c); |
c47c50f8 | 1160 | |
451570a5 KO |
1161 | return ret; |
1162 | } | |
1163 | ||
1164 | int bch2_gc_gens(struct bch_fs *c) | |
1165 | { | |
1166 | struct bch_dev *ca; | |
1167 | struct bucket_array *buckets; | |
1168 | struct bucket *g; | |
1169 | unsigned i; | |
1170 | int ret; | |
1171 | ||
b9c3d139 KO |
1172 | /* |
1173 | * Ideally we would be using state_lock and not gc_lock here, but that | |
1174 | * introduces a deadlock in the RO path - we currently take the state | |
1175 | * lock at the start of going RO, thus the gc thread may get stuck: | |
1176 | */ | |
1177 | down_read(&c->gc_lock); | |
451570a5 KO |
1178 | |
1179 | for_each_member_device(ca, c, i) { | |
1180 | down_read(&ca->bucket_lock); | |
1181 | buckets = bucket_array(ca); | |
1182 | ||
1183 | for_each_bucket(g, buckets) | |
1184 | g->gc_gen = g->mark.gen; | |
1185 | up_read(&ca->bucket_lock); | |
1186 | } | |
1187 | ||
1188 | for (i = 0; i < BTREE_ID_NR; i++) | |
1189 | if (btree_node_type_needs_gc(i)) { | |
1190 | ret = bch2_gc_btree_gens(c, i); | |
74ed7e56 KO |
1191 | if (ret) { |
1192 | bch_err(c, "error recalculating oldest_gen: %i", ret); | |
451570a5 | 1193 | goto err; |
74ed7e56 | 1194 | } |
451570a5 KO |
1195 | } |
1196 | ||
1197 | for_each_member_device(ca, c, i) { | |
1198 | down_read(&ca->bucket_lock); | |
1199 | buckets = bucket_array(ca); | |
1200 | ||
1201 | for_each_bucket(g, buckets) | |
1202 | g->oldest_gen = g->gc_gen; | |
1203 | up_read(&ca->bucket_lock); | |
1204 | } | |
74ed7e56 KO |
1205 | |
1206 | c->gc_count++; | |
451570a5 | 1207 | err: |
b9c3d139 | 1208 | up_read(&c->gc_lock); |
451570a5 KO |
1209 | return ret; |
1210 | } | |
1211 | ||
1c6fdbd8 KO |
1212 | /* Btree coalescing */ |
1213 | ||
1214 | static void recalc_packed_keys(struct btree *b) | |
1215 | { | |
216c9fac | 1216 | struct bset *i = btree_bset_first(b); |
1c6fdbd8 KO |
1217 | struct bkey_packed *k; |
1218 | ||
1219 | memset(&b->nr, 0, sizeof(b->nr)); | |
1220 | ||
1221 | BUG_ON(b->nsets != 1); | |
1222 | ||
216c9fac | 1223 | vstruct_for_each(i, k) |
1c6fdbd8 KO |
1224 | btree_keys_account_key_add(&b->nr, 0, k); |
1225 | } | |
1226 | ||
1227 | static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter, | |
1228 | struct btree *old_nodes[GC_MERGE_NODES]) | |
1229 | { | |
1230 | struct btree *parent = btree_node_parent(iter, old_nodes[0]); | |
1231 | unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0; | |
1232 | unsigned blocks = btree_blocks(c) * 2 / 3; | |
1233 | struct btree *new_nodes[GC_MERGE_NODES]; | |
1234 | struct btree_update *as; | |
1235 | struct keylist keylist; | |
1236 | struct bkey_format_state format_state; | |
1237 | struct bkey_format new_format; | |
1238 | ||
1239 | memset(new_nodes, 0, sizeof(new_nodes)); | |
1240 | bch2_keylist_init(&keylist, NULL); | |
1241 | ||
1242 | /* Count keys that are not deleted */ | |
1243 | for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++) | |
1244 | u64s += old_nodes[i]->nr.live_u64s; | |
1245 | ||
1246 | nr_old_nodes = nr_new_nodes = i; | |
1247 | ||
1248 | /* Check if all keys in @old_nodes could fit in one fewer node */ | |
1249 | if (nr_old_nodes <= 1 || | |
1250 | __vstruct_blocks(struct btree_node, c->block_bits, | |
1251 | DIV_ROUND_UP(u64s, nr_old_nodes - 1)) > blocks) | |
1252 | return; | |
1253 | ||
1254 | /* Find a format that all keys in @old_nodes can pack into */ | |
1255 | bch2_bkey_format_init(&format_state); | |
1256 | ||
1257 | for (i = 0; i < nr_old_nodes; i++) | |
1258 | __bch2_btree_calc_format(&format_state, old_nodes[i]); | |
1259 | ||
1260 | new_format = bch2_bkey_format_done(&format_state); | |
1261 | ||
1262 | /* Check if repacking would make any nodes too big to fit */ | |
1263 | for (i = 0; i < nr_old_nodes; i++) | |
1264 | if (!bch2_btree_node_format_fits(c, old_nodes[i], &new_format)) { | |
1265 | trace_btree_gc_coalesce_fail(c, | |
1266 | BTREE_GC_COALESCE_FAIL_FORMAT_FITS); | |
1267 | return; | |
1268 | } | |
1269 | ||
1270 | if (bch2_keylist_realloc(&keylist, NULL, 0, | |
07a1006a | 1271 | BKEY_BTREE_PTR_U64s_MAX * nr_old_nodes)) { |
1c6fdbd8 KO |
1272 | trace_btree_gc_coalesce_fail(c, |
1273 | BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC); | |
1274 | return; | |
1275 | } | |
1276 | ||
0f9dda47 | 1277 | as = bch2_btree_update_start(iter->trans, iter->btree_id, |
1c6fdbd8 KO |
1278 | btree_update_reserve_required(c, parent) + nr_old_nodes, |
1279 | BTREE_INSERT_NOFAIL| | |
1280 | BTREE_INSERT_USE_RESERVE, | |
1281 | NULL); | |
1282 | if (IS_ERR(as)) { | |
1283 | trace_btree_gc_coalesce_fail(c, | |
1284 | BTREE_GC_COALESCE_FAIL_RESERVE_GET); | |
1285 | bch2_keylist_free(&keylist, NULL); | |
1286 | return; | |
1287 | } | |
1288 | ||
1289 | trace_btree_gc_coalesce(c, old_nodes[0]); | |
1290 | ||
1291 | for (i = 0; i < nr_old_nodes; i++) | |
1292 | bch2_btree_interior_update_will_free_node(as, old_nodes[i]); | |
1293 | ||
1294 | /* Repack everything with @new_format and sort down to one bset */ | |
1295 | for (i = 0; i < nr_old_nodes; i++) | |
1296 | new_nodes[i] = | |
1297 | __bch2_btree_node_alloc_replacement(as, old_nodes[i], | |
1298 | new_format); | |
1299 | ||
1300 | /* | |
1301 | * Conceptually we concatenate the nodes together and slice them | |
1302 | * up at different boundaries. | |
1303 | */ | |
1304 | for (i = nr_new_nodes - 1; i > 0; --i) { | |
1305 | struct btree *n1 = new_nodes[i]; | |
1306 | struct btree *n2 = new_nodes[i - 1]; | |
1307 | ||
1308 | struct bset *s1 = btree_bset_first(n1); | |
1309 | struct bset *s2 = btree_bset_first(n2); | |
1310 | struct bkey_packed *k, *last = NULL; | |
1311 | ||
1312 | /* Calculate how many keys from @n2 we could fit inside @n1 */ | |
1313 | u64s = 0; | |
1314 | ||
1315 | for (k = s2->start; | |
1316 | k < vstruct_last(s2) && | |
1317 | vstruct_blocks_plus(n1->data, c->block_bits, | |
1318 | u64s + k->u64s) <= blocks; | |
ad44bdc3 | 1319 | k = bkey_next_skip_noops(k, vstruct_last(s2))) { |
1c6fdbd8 KO |
1320 | last = k; |
1321 | u64s += k->u64s; | |
1322 | } | |
1323 | ||
1324 | if (u64s == le16_to_cpu(s2->u64s)) { | |
1325 | /* n2 fits entirely in n1 */ | |
1326 | n1->key.k.p = n1->data->max_key = n2->data->max_key; | |
1327 | ||
1328 | memcpy_u64s(vstruct_last(s1), | |
1329 | s2->start, | |
1330 | le16_to_cpu(s2->u64s)); | |
1331 | le16_add_cpu(&s1->u64s, le16_to_cpu(s2->u64s)); | |
1332 | ||
1333 | set_btree_bset_end(n1, n1->set); | |
1334 | ||
c43a6ef9 | 1335 | six_unlock_write(&n2->c.lock); |
1c6fdbd8 | 1336 | bch2_btree_node_free_never_inserted(c, n2); |
c43a6ef9 | 1337 | six_unlock_intent(&n2->c.lock); |
1c6fdbd8 KO |
1338 | |
1339 | memmove(new_nodes + i - 1, | |
1340 | new_nodes + i, | |
1341 | sizeof(new_nodes[0]) * (nr_new_nodes - i)); | |
1342 | new_nodes[--nr_new_nodes] = NULL; | |
1343 | } else if (u64s) { | |
1344 | /* move part of n2 into n1 */ | |
1345 | n1->key.k.p = n1->data->max_key = | |
1346 | bkey_unpack_pos(n1, last); | |
1347 | ||
39fb2983 | 1348 | n2->data->min_key = bkey_successor(n1->data->max_key); |
1c6fdbd8 KO |
1349 | |
1350 | memcpy_u64s(vstruct_last(s1), | |
1351 | s2->start, u64s); | |
1352 | le16_add_cpu(&s1->u64s, u64s); | |
1353 | ||
1354 | memmove(s2->start, | |
1355 | vstruct_idx(s2, u64s), | |
1356 | (le16_to_cpu(s2->u64s) - u64s) * sizeof(u64)); | |
1357 | s2->u64s = cpu_to_le16(le16_to_cpu(s2->u64s) - u64s); | |
1358 | ||
1359 | set_btree_bset_end(n1, n1->set); | |
1360 | set_btree_bset_end(n2, n2->set); | |
1361 | } | |
1362 | } | |
1363 | ||
1364 | for (i = 0; i < nr_new_nodes; i++) { | |
1365 | struct btree *n = new_nodes[i]; | |
1366 | ||
1367 | recalc_packed_keys(n); | |
1368 | btree_node_reset_sib_u64s(n); | |
1369 | ||
1370 | bch2_btree_build_aux_trees(n); | |
00b8ccf7 KO |
1371 | |
1372 | bch2_btree_update_add_new_node(as, n); | |
c43a6ef9 | 1373 | six_unlock_write(&n->c.lock); |
1c6fdbd8 KO |
1374 | |
1375 | bch2_btree_node_write(c, n, SIX_LOCK_intent); | |
1376 | } | |
1377 | ||
1378 | /* | |
1379 | * The keys for the old nodes get deleted. We don't want to insert keys | |
1380 | * that compare equal to the keys for the new nodes we'll also be | |
1381 | * inserting - we can't because keys on a keylist must be strictly | |
1382 | * greater than the previous keys, and we also don't need to since the | |
1383 | * key for the new node will serve the same purpose (overwriting the key | |
1384 | * for the old node). | |
1385 | */ | |
1386 | for (i = 0; i < nr_old_nodes; i++) { | |
1387 | struct bkey_i delete; | |
1388 | unsigned j; | |
1389 | ||
1390 | for (j = 0; j < nr_new_nodes; j++) | |
1391 | if (!bkey_cmp(old_nodes[i]->key.k.p, | |
1392 | new_nodes[j]->key.k.p)) | |
1393 | goto next; | |
1394 | ||
1395 | bkey_init(&delete.k); | |
1396 | delete.k.p = old_nodes[i]->key.k.p; | |
1397 | bch2_keylist_add_in_order(&keylist, &delete); | |
1398 | next: | |
1399 | i = i; | |
1400 | } | |
1401 | ||
1402 | /* | |
1403 | * Keys for the new nodes get inserted: bch2_btree_insert_keys() only | |
1404 | * does the lookup once and thus expects the keys to be in sorted order | |
1405 | * so we have to make sure the new keys are correctly ordered with | |
1406 | * respect to the deleted keys added in the previous loop | |
1407 | */ | |
1408 | for (i = 0; i < nr_new_nodes; i++) | |
1409 | bch2_keylist_add_in_order(&keylist, &new_nodes[i]->key); | |
1410 | ||
1411 | /* Insert the newly coalesced nodes */ | |
1412 | bch2_btree_insert_node(as, parent, iter, &keylist, 0); | |
1413 | ||
1414 | BUG_ON(!bch2_keylist_empty(&keylist)); | |
1415 | ||
c43a6ef9 | 1416 | BUG_ON(iter->l[old_nodes[0]->c.level].b != old_nodes[0]); |
1c6fdbd8 KO |
1417 | |
1418 | bch2_btree_iter_node_replace(iter, new_nodes[0]); | |
1419 | ||
1420 | for (i = 0; i < nr_new_nodes; i++) | |
00b8ccf7 | 1421 | bch2_btree_update_get_open_buckets(as, new_nodes[i]); |
1c6fdbd8 KO |
1422 | |
1423 | /* Free the old nodes and update our sliding window */ | |
1424 | for (i = 0; i < nr_old_nodes; i++) { | |
1425 | bch2_btree_node_free_inmem(c, old_nodes[i], iter); | |
1c6fdbd8 KO |
1426 | |
1427 | /* | |
1428 | * the index update might have triggered a split, in which case | |
1429 | * the nodes we coalesced - the new nodes we just created - | |
1430 | * might not be sibling nodes anymore - don't add them to the | |
1431 | * sliding window (except the first): | |
1432 | */ | |
1433 | if (!i) { | |
1434 | old_nodes[i] = new_nodes[i]; | |
1435 | } else { | |
1436 | old_nodes[i] = NULL; | |
1c6fdbd8 KO |
1437 | } |
1438 | } | |
1439 | ||
ea3532cb KO |
1440 | for (i = 0; i < nr_new_nodes; i++) |
1441 | six_unlock_intent(&new_nodes[i]->c.lock); | |
1442 | ||
1c6fdbd8 KO |
1443 | bch2_btree_update_done(as); |
1444 | bch2_keylist_free(&keylist, NULL); | |
1445 | } | |
1446 | ||
1447 | static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) | |
1448 | { | |
424eb881 KO |
1449 | struct btree_trans trans; |
1450 | struct btree_iter *iter; | |
1c6fdbd8 KO |
1451 | struct btree *b; |
1452 | bool kthread = (current->flags & PF_KTHREAD) != 0; | |
1453 | unsigned i; | |
1454 | ||
1455 | /* Sliding window of adjacent btree nodes */ | |
1456 | struct btree *merge[GC_MERGE_NODES]; | |
1457 | u32 lock_seq[GC_MERGE_NODES]; | |
1458 | ||
20bceecb | 1459 | bch2_trans_init(&trans, c, 0, 0); |
424eb881 | 1460 | |
1c6fdbd8 KO |
1461 | /* |
1462 | * XXX: We don't have a good way of positively matching on sibling nodes | |
1463 | * that have the same parent - this code works by handling the cases | |
1464 | * where they might not have the same parent, and is thus fragile. Ugh. | |
1465 | * | |
1466 | * Perhaps redo this to use multiple linked iterators? | |
1467 | */ | |
1468 | memset(merge, 0, sizeof(merge)); | |
1469 | ||
424eb881 | 1470 | __for_each_btree_node(&trans, iter, btree_id, POS_MIN, |
1c6fdbd8 KO |
1471 | BTREE_MAX_DEPTH, 0, |
1472 | BTREE_ITER_PREFETCH, b) { | |
1473 | memmove(merge + 1, merge, | |
1474 | sizeof(merge) - sizeof(merge[0])); | |
1475 | memmove(lock_seq + 1, lock_seq, | |
1476 | sizeof(lock_seq) - sizeof(lock_seq[0])); | |
1477 | ||
1478 | merge[0] = b; | |
1479 | ||
1480 | for (i = 1; i < GC_MERGE_NODES; i++) { | |
1481 | if (!merge[i] || | |
c43a6ef9 | 1482 | !six_relock_intent(&merge[i]->c.lock, lock_seq[i])) |
1c6fdbd8 KO |
1483 | break; |
1484 | ||
c43a6ef9 KO |
1485 | if (merge[i]->c.level != merge[0]->c.level) { |
1486 | six_unlock_intent(&merge[i]->c.lock); | |
1c6fdbd8 KO |
1487 | break; |
1488 | } | |
1489 | } | |
1490 | memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0])); | |
1491 | ||
424eb881 | 1492 | bch2_coalesce_nodes(c, iter, merge); |
1c6fdbd8 KO |
1493 | |
1494 | for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) { | |
c43a6ef9 KO |
1495 | lock_seq[i] = merge[i]->c.lock.state.seq; |
1496 | six_unlock_intent(&merge[i]->c.lock); | |
1c6fdbd8 KO |
1497 | } |
1498 | ||
c43a6ef9 | 1499 | lock_seq[0] = merge[0]->c.lock.state.seq; |
1c6fdbd8 KO |
1500 | |
1501 | if (kthread && kthread_should_stop()) { | |
424eb881 | 1502 | bch2_trans_exit(&trans); |
1c6fdbd8 KO |
1503 | return -ESHUTDOWN; |
1504 | } | |
1505 | ||
424eb881 | 1506 | bch2_trans_cond_resched(&trans); |
1c6fdbd8 KO |
1507 | |
1508 | /* | |
1509 | * If the parent node wasn't relocked, it might have been split | |
1510 | * and the nodes in our sliding window might not have the same | |
1511 | * parent anymore - blow away the sliding window: | |
1512 | */ | |
424eb881 KO |
1513 | if (btree_iter_node(iter, iter->level + 1) && |
1514 | !btree_node_intent_locked(iter, iter->level + 1)) | |
1c6fdbd8 KO |
1515 | memset(merge + 1, 0, |
1516 | (GC_MERGE_NODES - 1) * sizeof(merge[0])); | |
1517 | } | |
424eb881 | 1518 | return bch2_trans_exit(&trans); |
1c6fdbd8 KO |
1519 | } |
1520 | ||
1521 | /** | |
1522 | * bch_coalesce - coalesce adjacent nodes with low occupancy | |
1523 | */ | |
1524 | void bch2_coalesce(struct bch_fs *c) | |
1525 | { | |
1526 | enum btree_id id; | |
1527 | ||
1c6fdbd8 KO |
1528 | down_read(&c->gc_lock); |
1529 | trace_gc_coalesce_start(c); | |
1530 | ||
1531 | for (id = 0; id < BTREE_ID_NR; id++) { | |
1532 | int ret = c->btree_roots[id].b | |
1533 | ? bch2_coalesce_btree(c, id) | |
1534 | : 0; | |
1535 | ||
1536 | if (ret) { | |
1537 | if (ret != -ESHUTDOWN) | |
1538 | bch_err(c, "btree coalescing failed: %d", ret); | |
1c6fdbd8 KO |
1539 | return; |
1540 | } | |
1541 | } | |
1542 | ||
1543 | trace_gc_coalesce_end(c); | |
1544 | up_read(&c->gc_lock); | |
1545 | } | |
1546 | ||
1547 | static int bch2_gc_thread(void *arg) | |
1548 | { | |
1549 | struct bch_fs *c = arg; | |
1550 | struct io_clock *clock = &c->io_clock[WRITE]; | |
2abe5420 | 1551 | unsigned long last = atomic64_read(&clock->now); |
1c6fdbd8 | 1552 | unsigned last_kick = atomic_read(&c->kick_gc); |
9ca53b55 | 1553 | int ret; |
1c6fdbd8 KO |
1554 | |
1555 | set_freezable(); | |
1556 | ||
1557 | while (1) { | |
1558 | while (1) { | |
1559 | set_current_state(TASK_INTERRUPTIBLE); | |
1560 | ||
1561 | if (kthread_should_stop()) { | |
1562 | __set_current_state(TASK_RUNNING); | |
1563 | return 0; | |
1564 | } | |
1565 | ||
1566 | if (atomic_read(&c->kick_gc) != last_kick) | |
1567 | break; | |
1568 | ||
1569 | if (c->btree_gc_periodic) { | |
1570 | unsigned long next = last + c->capacity / 16; | |
1571 | ||
2abe5420 | 1572 | if (atomic64_read(&clock->now) >= next) |
1c6fdbd8 KO |
1573 | break; |
1574 | ||
1575 | bch2_io_clock_schedule_timeout(clock, next); | |
1576 | } else { | |
1577 | schedule(); | |
1578 | } | |
1579 | ||
1580 | try_to_freeze(); | |
1581 | } | |
1582 | __set_current_state(TASK_RUNNING); | |
1583 | ||
2abe5420 | 1584 | last = atomic64_read(&clock->now); |
1c6fdbd8 KO |
1585 | last_kick = atomic_read(&c->kick_gc); |
1586 | ||
451570a5 KO |
1587 | /* |
1588 | * Full gc is currently incompatible with btree key cache: | |
1589 | */ | |
1590 | #if 0 | |
5b593ee1 | 1591 | ret = bch2_gc(c, false, false); |
451570a5 KO |
1592 | #else |
1593 | ret = bch2_gc_gens(c); | |
1594 | #endif | |
8d6b6222 | 1595 | if (ret < 0) |
9ca53b55 | 1596 | bch_err(c, "btree gc failed: %i", ret); |
1c6fdbd8 KO |
1597 | |
1598 | debug_check_no_locks_held(); | |
1599 | } | |
1600 | ||
1601 | return 0; | |
1602 | } | |
1603 | ||
1604 | void bch2_gc_thread_stop(struct bch_fs *c) | |
1605 | { | |
1606 | struct task_struct *p; | |
1607 | ||
1608 | p = c->gc_thread; | |
1609 | c->gc_thread = NULL; | |
1610 | ||
1611 | if (p) { | |
1612 | kthread_stop(p); | |
1613 | put_task_struct(p); | |
1614 | } | |
1615 | } | |
1616 | ||
1617 | int bch2_gc_thread_start(struct bch_fs *c) | |
1618 | { | |
1619 | struct task_struct *p; | |
1620 | ||
a4805d66 KO |
1621 | if (c->gc_thread) |
1622 | return 0; | |
1c6fdbd8 | 1623 | |
b7a9bbfc | 1624 | p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name); |
dab9ef0d KO |
1625 | if (IS_ERR(p)) { |
1626 | bch_err(c, "error creating gc thread: %li", PTR_ERR(p)); | |
1c6fdbd8 | 1627 | return PTR_ERR(p); |
dab9ef0d | 1628 | } |
1c6fdbd8 KO |
1629 | |
1630 | get_task_struct(p); | |
1631 | c->gc_thread = p; | |
1632 | wake_up_process(p); | |
1633 | return 0; | |
1634 | } |