Commit | Line | Data |
---|---|---|
7b3f84ea | 1 | // SPDX-License-Identifier: GPL-2.0 |
1c6fdbd8 | 2 | #include "bcachefs.h" |
7b3f84ea KO |
3 | #include "alloc_background.h" |
4 | #include "alloc_foreground.h" | |
1c6fdbd8 KO |
5 | #include "btree_cache.h" |
6 | #include "btree_io.h" | |
7 | #include "btree_update.h" | |
8 | #include "btree_update_interior.h" | |
9 | #include "btree_gc.h" | |
10 | #include "buckets.h" | |
1c6fdbd8 KO |
11 | #include "clock.h" |
12 | #include "debug.h" | |
cd575ddf | 13 | #include "ec.h" |
1c6fdbd8 | 14 | #include "error.h" |
1c6fdbd8 | 15 | #include "journal_io.h" |
1c6fdbd8 KO |
16 | #include "trace.h" |
17 | ||
1c6fdbd8 KO |
18 | #include <linux/kthread.h> |
19 | #include <linux/math64.h> | |
20 | #include <linux/random.h> | |
21 | #include <linux/rculist.h> | |
22 | #include <linux/rcupdate.h> | |
23 | #include <linux/sched/task.h> | |
24 | #include <linux/sort.h> | |
25 | ||
90541a74 KO |
26 | static const char * const bch2_alloc_field_names[] = { |
27 | #define x(name, bytes) #name, | |
28 | BCH_ALLOC_FIELDS() | |
29 | #undef x | |
30 | NULL | |
31 | }; | |
32 | ||
1c6fdbd8 KO |
33 | static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int); |
34 | ||
35 | /* Ratelimiting/PD controllers */ | |
36 | ||
37 | static void pd_controllers_update(struct work_struct *work) | |
38 | { | |
39 | struct bch_fs *c = container_of(to_delayed_work(work), | |
40 | struct bch_fs, | |
41 | pd_controllers_update); | |
42 | struct bch_dev *ca; | |
43 | unsigned i; | |
44 | ||
45 | for_each_member_device(ca, c, i) { | |
46 | struct bch_dev_usage stats = bch2_dev_usage_read(c, ca); | |
47 | ||
48 | u64 free = bucket_to_sector(ca, | |
49 | __dev_buckets_free(ca, stats)) << 9; | |
50 | /* | |
51 | * Bytes of internal fragmentation, which can be | |
52 | * reclaimed by copy GC | |
53 | */ | |
54 | s64 fragmented = (bucket_to_sector(ca, | |
55 | stats.buckets[BCH_DATA_USER] + | |
56 | stats.buckets[BCH_DATA_CACHED]) - | |
57 | (stats.sectors[BCH_DATA_USER] + | |
58 | stats.sectors[BCH_DATA_CACHED])) << 9; | |
59 | ||
60 | fragmented = max(0LL, fragmented); | |
61 | ||
62 | bch2_pd_controller_update(&ca->copygc_pd, | |
63 | free, fragmented, -1); | |
64 | } | |
65 | ||
66 | schedule_delayed_work(&c->pd_controllers_update, | |
67 | c->pd_controllers_update_seconds * HZ); | |
68 | } | |
69 | ||
70 | /* Persistent alloc info: */ | |
71 | ||
90541a74 KO |
72 | static inline u64 get_alloc_field(const struct bch_alloc *a, |
73 | const void **p, unsigned field) | |
74 | { | |
75 | unsigned bytes = BCH_ALLOC_FIELD_BYTES[field]; | |
76 | u64 v; | |
77 | ||
78 | if (!(a->fields & (1 << field))) | |
79 | return 0; | |
80 | ||
81 | switch (bytes) { | |
82 | case 1: | |
83 | v = *((const u8 *) *p); | |
84 | break; | |
85 | case 2: | |
86 | v = le16_to_cpup(*p); | |
87 | break; | |
88 | case 4: | |
89 | v = le32_to_cpup(*p); | |
90 | break; | |
91 | case 8: | |
92 | v = le64_to_cpup(*p); | |
93 | break; | |
94 | default: | |
95 | BUG(); | |
96 | } | |
97 | ||
98 | *p += bytes; | |
99 | return v; | |
100 | } | |
101 | ||
102 | static inline void put_alloc_field(struct bkey_i_alloc *a, void **p, | |
103 | unsigned field, u64 v) | |
104 | { | |
105 | unsigned bytes = BCH_ALLOC_FIELD_BYTES[field]; | |
106 | ||
107 | if (!v) | |
108 | return; | |
109 | ||
110 | a->v.fields |= 1 << field; | |
111 | ||
112 | switch (bytes) { | |
113 | case 1: | |
114 | *((u8 *) *p) = v; | |
115 | break; | |
116 | case 2: | |
117 | *((__le16 *) *p) = cpu_to_le16(v); | |
118 | break; | |
119 | case 4: | |
120 | *((__le32 *) *p) = cpu_to_le32(v); | |
121 | break; | |
122 | case 8: | |
123 | *((__le64 *) *p) = cpu_to_le64(v); | |
124 | break; | |
125 | default: | |
126 | BUG(); | |
127 | } | |
128 | ||
129 | *p += bytes; | |
130 | } | |
131 | ||
8fe826f9 KO |
132 | struct bkey_alloc_unpacked bch2_alloc_unpack(const struct bch_alloc *a) |
133 | { | |
134 | struct bkey_alloc_unpacked ret = { .gen = a->gen }; | |
135 | const void *d = a->data; | |
136 | unsigned idx = 0; | |
137 | ||
138 | #define x(_name, _bits) ret._name = get_alloc_field(a, &d, idx++); | |
139 | BCH_ALLOC_FIELDS() | |
140 | #undef x | |
141 | return ret; | |
142 | } | |
143 | ||
144 | static void bch2_alloc_pack(struct bkey_i_alloc *dst, | |
145 | const struct bkey_alloc_unpacked src) | |
146 | { | |
147 | unsigned idx = 0; | |
148 | void *d = dst->v.data; | |
149 | ||
150 | dst->v.fields = 0; | |
151 | dst->v.gen = src.gen; | |
152 | ||
153 | #define x(_name, _bits) put_alloc_field(dst, &d, idx++, src._name); | |
154 | BCH_ALLOC_FIELDS() | |
155 | #undef x | |
156 | ||
157 | set_bkey_val_bytes(&dst->k, (void *) d - (void *) &dst->v); | |
158 | } | |
159 | ||
1c6fdbd8 KO |
160 | static unsigned bch_alloc_val_u64s(const struct bch_alloc *a) |
161 | { | |
90541a74 | 162 | unsigned i, bytes = offsetof(struct bch_alloc, data); |
1c6fdbd8 | 163 | |
90541a74 KO |
164 | for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_FIELD_BYTES); i++) |
165 | if (a->fields & (1 << i)) | |
166 | bytes += BCH_ALLOC_FIELD_BYTES[i]; | |
1c6fdbd8 KO |
167 | |
168 | return DIV_ROUND_UP(bytes, sizeof(u64)); | |
169 | } | |
170 | ||
171 | const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k) | |
172 | { | |
26609b61 KO |
173 | struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); |
174 | ||
1c6fdbd8 KO |
175 | if (k.k->p.inode >= c->sb.nr_devices || |
176 | !c->devs[k.k->p.inode]) | |
177 | return "invalid device"; | |
178 | ||
26609b61 KO |
179 | /* allow for unknown fields */ |
180 | if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v)) | |
181 | return "incorrect value size"; | |
1c6fdbd8 KO |
182 | |
183 | return NULL; | |
184 | } | |
185 | ||
319f9ac3 KO |
186 | void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, |
187 | struct bkey_s_c k) | |
1c6fdbd8 | 188 | { |
26609b61 | 189 | struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); |
90541a74 KO |
190 | const void *d = a.v->data; |
191 | unsigned i; | |
319f9ac3 | 192 | |
26609b61 | 193 | pr_buf(out, "gen %u", a.v->gen); |
90541a74 KO |
194 | |
195 | for (i = 0; i < BCH_ALLOC_FIELD_NR; i++) | |
196 | if (a.v->fields & (1 << i)) | |
197 | pr_buf(out, " %s %llu", | |
198 | bch2_alloc_field_names[i], | |
199 | get_alloc_field(a.v, &d, i)); | |
1c6fdbd8 KO |
200 | } |
201 | ||
90541a74 | 202 | static void __alloc_read_key(struct bucket *g, const struct bch_alloc *a) |
1c6fdbd8 | 203 | { |
90541a74 | 204 | const void *d = a->data; |
8fe826f9 KO |
205 | unsigned idx = 0, data_type, dirty_sectors, cached_sectors; |
206 | struct bucket_mark m; | |
90541a74 | 207 | |
90541a74 KO |
208 | g->io_time[READ] = get_alloc_field(a, &d, idx++); |
209 | g->io_time[WRITE] = get_alloc_field(a, &d, idx++); | |
8fe826f9 KO |
210 | data_type = get_alloc_field(a, &d, idx++); |
211 | dirty_sectors = get_alloc_field(a, &d, idx++); | |
212 | cached_sectors = get_alloc_field(a, &d, idx++); | |
76f4c7b0 | 213 | g->oldest_gen = get_alloc_field(a, &d, idx++); |
8fe826f9 KO |
214 | |
215 | bucket_cmpxchg(g, m, ({ | |
216 | m.gen = a->gen; | |
217 | m.data_type = data_type; | |
218 | m.dirty_sectors = dirty_sectors; | |
219 | m.cached_sectors = cached_sectors; | |
220 | })); | |
221 | ||
222 | g->gen_valid = 1; | |
1c6fdbd8 KO |
223 | } |
224 | ||
8eb7f3ee KO |
225 | static void __alloc_write_key(struct bkey_i_alloc *a, struct bucket *g, |
226 | struct bucket_mark m) | |
1c6fdbd8 | 227 | { |
90541a74 KO |
228 | unsigned idx = 0; |
229 | void *d = a->v.data; | |
1c6fdbd8 | 230 | |
90541a74 KO |
231 | a->v.fields = 0; |
232 | a->v.gen = m.gen; | |
233 | ||
234 | d = a->v.data; | |
235 | put_alloc_field(a, &d, idx++, g->io_time[READ]); | |
236 | put_alloc_field(a, &d, idx++, g->io_time[WRITE]); | |
237 | put_alloc_field(a, &d, idx++, m.data_type); | |
238 | put_alloc_field(a, &d, idx++, m.dirty_sectors); | |
239 | put_alloc_field(a, &d, idx++, m.cached_sectors); | |
76f4c7b0 | 240 | put_alloc_field(a, &d, idx++, g->oldest_gen); |
90541a74 KO |
241 | |
242 | set_bkey_val_bytes(&a->k, (void *) d - (void *) &a->v); | |
1c6fdbd8 KO |
243 | } |
244 | ||
245 | static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k) | |
246 | { | |
247 | struct bch_dev *ca; | |
248 | struct bkey_s_c_alloc a; | |
1c6fdbd8 | 249 | |
26609b61 | 250 | if (k.k->type != KEY_TYPE_alloc) |
1c6fdbd8 KO |
251 | return; |
252 | ||
253 | a = bkey_s_c_to_alloc(k); | |
254 | ca = bch_dev_bkey_exists(c, a.k->p.inode); | |
255 | ||
256 | if (a.k->p.offset >= ca->mi.nbuckets) | |
257 | return; | |
258 | ||
9166b41d | 259 | percpu_down_read(&c->mark_lock); |
90541a74 | 260 | __alloc_read_key(bucket(ca, a.k->p.offset), a.v); |
9166b41d | 261 | percpu_up_read(&c->mark_lock); |
1c6fdbd8 KO |
262 | } |
263 | ||
264 | int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list) | |
265 | { | |
266 | struct journal_replay *r; | |
267 | struct btree_iter iter; | |
268 | struct bkey_s_c k; | |
269 | struct bch_dev *ca; | |
270 | unsigned i; | |
271 | int ret; | |
272 | ||
273 | for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) { | |
274 | bch2_alloc_read_key(c, k); | |
275 | bch2_btree_iter_cond_resched(&iter); | |
276 | } | |
277 | ||
278 | ret = bch2_btree_iter_unlock(&iter); | |
279 | if (ret) | |
280 | return ret; | |
281 | ||
282 | list_for_each_entry(r, journal_replay_list, list) { | |
283 | struct bkey_i *k, *n; | |
284 | struct jset_entry *entry; | |
285 | ||
286 | for_each_jset_key(k, n, entry, &r->j) | |
287 | if (entry->btree_id == BTREE_ID_ALLOC) | |
288 | bch2_alloc_read_key(c, bkey_i_to_s_c(k)); | |
289 | } | |
290 | ||
430735cd KO |
291 | for_each_member_device(ca, c, i) |
292 | bch2_dev_usage_from_buckets(c, ca); | |
293 | ||
1c6fdbd8 KO |
294 | mutex_lock(&c->bucket_clock[READ].lock); |
295 | for_each_member_device(ca, c, i) { | |
296 | down_read(&ca->bucket_lock); | |
297 | bch2_recalc_oldest_io(c, ca, READ); | |
298 | up_read(&ca->bucket_lock); | |
299 | } | |
300 | mutex_unlock(&c->bucket_clock[READ].lock); | |
301 | ||
302 | mutex_lock(&c->bucket_clock[WRITE].lock); | |
303 | for_each_member_device(ca, c, i) { | |
304 | down_read(&ca->bucket_lock); | |
305 | bch2_recalc_oldest_io(c, ca, WRITE); | |
306 | up_read(&ca->bucket_lock); | |
307 | } | |
308 | mutex_unlock(&c->bucket_clock[WRITE].lock); | |
309 | ||
310 | return 0; | |
311 | } | |
312 | ||
0564b167 KO |
313 | int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k) |
314 | { | |
315 | struct btree_trans trans; | |
316 | struct btree_iter *iter; | |
317 | struct bch_dev *ca; | |
318 | int ret; | |
319 | ||
320 | if (k->k.p.inode >= c->sb.nr_devices || | |
321 | !c->devs[k->k.p.inode]) | |
322 | return 0; | |
323 | ||
324 | ca = bch_dev_bkey_exists(c, k->k.p.inode); | |
325 | ||
326 | if (k->k.p.offset >= ca->mi.nbuckets) | |
327 | return 0; | |
328 | ||
329 | bch2_trans_init(&trans, c); | |
330 | ||
331 | iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, k->k.p, | |
332 | BTREE_ITER_INTENT); | |
333 | ||
334 | ret = bch2_btree_iter_traverse(iter); | |
335 | if (ret) | |
336 | goto err; | |
337 | ||
338 | /* check buckets_written with btree node locked: */ | |
339 | if (test_bit(k->k.p.offset, ca->buckets_written)) { | |
340 | ret = 0; | |
341 | goto err; | |
342 | } | |
343 | ||
344 | bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k)); | |
345 | ||
346 | ret = bch2_trans_commit(&trans, NULL, NULL, | |
347 | BTREE_INSERT_NOFAIL| | |
134915f3 | 348 | BTREE_INSERT_LAZY_RW| |
0564b167 KO |
349 | BTREE_INSERT_JOURNAL_REPLAY| |
350 | BTREE_INSERT_NOMARK); | |
351 | err: | |
352 | bch2_trans_exit(&trans); | |
353 | return ret; | |
354 | } | |
355 | ||
356 | static int __bch2_alloc_write_key(struct btree_trans *trans, struct bch_dev *ca, | |
1c6fdbd8 | 357 | size_t b, struct btree_iter *iter, |
b29e197a | 358 | u64 *journal_seq, unsigned flags) |
1c6fdbd8 | 359 | { |
0564b167 | 360 | struct bch_fs *c = trans->c; |
90541a74 KO |
361 | #if 0 |
362 | __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key; | |
363 | #else | |
364 | /* hack: */ | |
365 | __BKEY_PADDED(k, 8) alloc_key; | |
366 | #endif | |
367 | struct bkey_i_alloc *a = bkey_alloc_init(&alloc_key.k); | |
8eb7f3ee | 368 | struct bucket *g; |
430735cd | 369 | struct bucket_mark m, new; |
61274e9d | 370 | int ret; |
1c6fdbd8 | 371 | |
90541a74 | 372 | BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8); |
b29e197a | 373 | |
90541a74 | 374 | a->k.p = POS(ca->dev_idx, b); |
b29e197a | 375 | |
430735cd KO |
376 | bch2_btree_iter_set_pos(iter, a->k.p); |
377 | ||
378 | ret = bch2_btree_iter_traverse(iter); | |
379 | if (ret) | |
380 | return ret; | |
381 | ||
9166b41d | 382 | percpu_down_read(&c->mark_lock); |
8eb7f3ee | 383 | g = bucket(ca, b); |
430735cd KO |
384 | m = READ_ONCE(g->mark); |
385 | ||
386 | if (!m.dirty) { | |
387 | percpu_up_read(&c->mark_lock); | |
388 | return 0; | |
389 | } | |
8eb7f3ee KO |
390 | |
391 | __alloc_write_key(a, g, m); | |
9166b41d | 392 | percpu_up_read(&c->mark_lock); |
1c6fdbd8 | 393 | |
b29e197a | 394 | bch2_btree_iter_cond_resched(iter); |
1c6fdbd8 | 395 | |
0564b167 KO |
396 | bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i)); |
397 | ||
398 | ret = bch2_trans_commit(trans, NULL, journal_seq, | |
430735cd | 399 | BTREE_INSERT_NOCHECK_RW| |
61274e9d KO |
400 | BTREE_INSERT_NOFAIL| |
401 | BTREE_INSERT_USE_RESERVE| | |
402 | BTREE_INSERT_USE_ALLOC_RESERVE| | |
8fe826f9 | 403 | BTREE_INSERT_NOMARK| |
0564b167 | 404 | flags); |
430735cd KO |
405 | if (ret) |
406 | return ret; | |
61274e9d | 407 | |
430735cd KO |
408 | new = m; |
409 | new.dirty = false; | |
410 | atomic64_cmpxchg(&g->_mark.v, m.v.counter, new.v.counter); | |
411 | ||
412 | if (ca->buckets_written) | |
61274e9d KO |
413 | set_bit(b, ca->buckets_written); |
414 | ||
430735cd | 415 | return 0; |
1c6fdbd8 KO |
416 | } |
417 | ||
d0cc3def | 418 | int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote) |
1c6fdbd8 KO |
419 | { |
420 | struct bch_dev *ca; | |
421 | unsigned i; | |
422 | int ret = 0; | |
423 | ||
d0cc3def KO |
424 | *wrote = false; |
425 | ||
1c6fdbd8 | 426 | for_each_rw_member(ca, c, i) { |
0564b167 KO |
427 | struct btree_trans trans; |
428 | struct btree_iter *iter; | |
8eb7f3ee KO |
429 | struct bucket_array *buckets; |
430 | size_t b; | |
1c6fdbd8 | 431 | |
0564b167 KO |
432 | bch2_trans_init(&trans, c); |
433 | ||
434 | iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN, | |
435 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
1c6fdbd8 KO |
436 | |
437 | down_read(&ca->bucket_lock); | |
8eb7f3ee KO |
438 | buckets = bucket_array(ca); |
439 | ||
440 | for (b = buckets->first_bucket; | |
441 | b < buckets->nbuckets; | |
442 | b++) { | |
443 | if (!buckets->b[b].mark.dirty) | |
444 | continue; | |
445 | ||
0564b167 | 446 | ret = __bch2_alloc_write_key(&trans, ca, b, iter, NULL, |
d0cc3def KO |
447 | nowait |
448 | ? BTREE_INSERT_NOWAIT | |
449 | : 0); | |
1c6fdbd8 KO |
450 | if (ret) |
451 | break; | |
d0cc3def KO |
452 | |
453 | *wrote = true; | |
1c6fdbd8 KO |
454 | } |
455 | up_read(&ca->bucket_lock); | |
0564b167 KO |
456 | |
457 | bch2_trans_exit(&trans); | |
1c6fdbd8 KO |
458 | |
459 | if (ret) { | |
460 | percpu_ref_put(&ca->io_ref); | |
461 | break; | |
462 | } | |
463 | } | |
464 | ||
465 | return ret; | |
466 | } | |
467 | ||
468 | /* Bucket IO clocks: */ | |
469 | ||
470 | static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw) | |
471 | { | |
472 | struct bucket_clock *clock = &c->bucket_clock[rw]; | |
473 | struct bucket_array *buckets = bucket_array(ca); | |
474 | struct bucket *g; | |
475 | u16 max_last_io = 0; | |
476 | unsigned i; | |
477 | ||
478 | lockdep_assert_held(&c->bucket_clock[rw].lock); | |
479 | ||
480 | /* Recalculate max_last_io for this device: */ | |
481 | for_each_bucket(g, buckets) | |
482 | max_last_io = max(max_last_io, bucket_last_io(c, g, rw)); | |
483 | ||
484 | ca->max_last_bucket_io[rw] = max_last_io; | |
485 | ||
486 | /* Recalculate global max_last_io: */ | |
487 | max_last_io = 0; | |
488 | ||
489 | for_each_member_device(ca, c, i) | |
490 | max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]); | |
491 | ||
492 | clock->max_last_io = max_last_io; | |
493 | } | |
494 | ||
495 | static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw) | |
496 | { | |
497 | struct bucket_clock *clock = &c->bucket_clock[rw]; | |
498 | struct bucket_array *buckets; | |
499 | struct bch_dev *ca; | |
500 | struct bucket *g; | |
501 | unsigned i; | |
502 | ||
503 | trace_rescale_prios(c); | |
504 | ||
505 | for_each_member_device(ca, c, i) { | |
506 | down_read(&ca->bucket_lock); | |
507 | buckets = bucket_array(ca); | |
508 | ||
509 | for_each_bucket(g, buckets) | |
510 | g->io_time[rw] = clock->hand - | |
511 | bucket_last_io(c, g, rw) / 2; | |
512 | ||
513 | bch2_recalc_oldest_io(c, ca, rw); | |
514 | ||
515 | up_read(&ca->bucket_lock); | |
516 | } | |
517 | } | |
518 | ||
8b335bae KO |
519 | static inline u64 bucket_clock_freq(u64 capacity) |
520 | { | |
521 | return max(capacity >> 10, 2028ULL); | |
522 | } | |
523 | ||
1c6fdbd8 KO |
524 | static void bch2_inc_clock_hand(struct io_timer *timer) |
525 | { | |
526 | struct bucket_clock *clock = container_of(timer, | |
527 | struct bucket_clock, rescale); | |
528 | struct bch_fs *c = container_of(clock, | |
529 | struct bch_fs, bucket_clock[clock->rw]); | |
530 | struct bch_dev *ca; | |
531 | u64 capacity; | |
532 | unsigned i; | |
533 | ||
534 | mutex_lock(&clock->lock); | |
535 | ||
536 | /* if clock cannot be advanced more, rescale prio */ | |
537 | if (clock->max_last_io >= U16_MAX - 2) | |
538 | bch2_rescale_bucket_io_times(c, clock->rw); | |
539 | ||
540 | BUG_ON(clock->max_last_io >= U16_MAX - 2); | |
541 | ||
542 | for_each_member_device(ca, c, i) | |
543 | ca->max_last_bucket_io[clock->rw]++; | |
544 | clock->max_last_io++; | |
545 | clock->hand++; | |
546 | ||
547 | mutex_unlock(&clock->lock); | |
548 | ||
549 | capacity = READ_ONCE(c->capacity); | |
550 | ||
551 | if (!capacity) | |
552 | return; | |
553 | ||
554 | /* | |
555 | * we only increment when 0.1% of the filesystem capacity has been read | |
556 | * or written too, this determines if it's time | |
557 | * | |
558 | * XXX: we shouldn't really be going off of the capacity of devices in | |
559 | * RW mode (that will be 0 when we're RO, yet we can still service | |
560 | * reads) | |
561 | */ | |
8b335bae | 562 | timer->expire += bucket_clock_freq(capacity); |
1c6fdbd8 KO |
563 | |
564 | bch2_io_timer_add(&c->io_clock[clock->rw], timer); | |
565 | } | |
566 | ||
567 | static void bch2_bucket_clock_init(struct bch_fs *c, int rw) | |
568 | { | |
569 | struct bucket_clock *clock = &c->bucket_clock[rw]; | |
570 | ||
571 | clock->hand = 1; | |
572 | clock->rw = rw; | |
573 | clock->rescale.fn = bch2_inc_clock_hand; | |
8b335bae | 574 | clock->rescale.expire = bucket_clock_freq(c->capacity); |
1c6fdbd8 KO |
575 | mutex_init(&clock->lock); |
576 | } | |
577 | ||
578 | /* Background allocator thread: */ | |
579 | ||
580 | /* | |
581 | * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens | |
582 | * (marking them as invalidated on disk), then optionally issues discard | |
583 | * commands to the newly free buckets, then puts them on the various freelists. | |
584 | */ | |
585 | ||
1c6fdbd8 KO |
586 | #define BUCKET_GC_GEN_MAX 96U |
587 | ||
588 | /** | |
589 | * wait_buckets_available - wait on reclaimable buckets | |
590 | * | |
591 | * If there aren't enough available buckets to fill up free_inc, wait until | |
592 | * there are. | |
593 | */ | |
594 | static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca) | |
595 | { | |
596 | unsigned long gc_count = c->gc_count; | |
597 | int ret = 0; | |
598 | ||
599 | while (1) { | |
600 | set_current_state(TASK_INTERRUPTIBLE); | |
601 | if (kthread_should_stop()) { | |
602 | ret = 1; | |
603 | break; | |
604 | } | |
605 | ||
606 | if (gc_count != c->gc_count) | |
607 | ca->inc_gen_really_needs_gc = 0; | |
608 | ||
609 | if ((ssize_t) (dev_buckets_available(c, ca) - | |
610 | ca->inc_gen_really_needs_gc) >= | |
611 | (ssize_t) fifo_free(&ca->free_inc)) | |
612 | break; | |
613 | ||
614 | up_read(&c->gc_lock); | |
615 | schedule(); | |
616 | try_to_freeze(); | |
617 | down_read(&c->gc_lock); | |
618 | } | |
619 | ||
620 | __set_current_state(TASK_RUNNING); | |
621 | return ret; | |
622 | } | |
623 | ||
624 | static bool bch2_can_invalidate_bucket(struct bch_dev *ca, | |
625 | size_t bucket, | |
626 | struct bucket_mark mark) | |
627 | { | |
628 | u8 gc_gen; | |
629 | ||
630 | if (!is_available_bucket(mark)) | |
631 | return false; | |
632 | ||
8eb7f3ee KO |
633 | if (ca->buckets_nouse && |
634 | test_bit(bucket, ca->buckets_nouse)) | |
635 | return false; | |
636 | ||
1c6fdbd8 KO |
637 | gc_gen = bucket_gc_gen(ca, bucket); |
638 | ||
639 | if (gc_gen >= BUCKET_GC_GEN_MAX / 2) | |
640 | ca->inc_gen_needs_gc++; | |
641 | ||
642 | if (gc_gen >= BUCKET_GC_GEN_MAX) | |
643 | ca->inc_gen_really_needs_gc++; | |
644 | ||
645 | return gc_gen < BUCKET_GC_GEN_MAX; | |
646 | } | |
647 | ||
1c6fdbd8 KO |
648 | /* |
649 | * Determines what order we're going to reuse buckets, smallest bucket_key() | |
650 | * first. | |
651 | * | |
652 | * | |
653 | * - We take into account the read prio of the bucket, which gives us an | |
654 | * indication of how hot the data is -- we scale the prio so that the prio | |
655 | * farthest from the clock is worth 1/8th of the closest. | |
656 | * | |
657 | * - The number of sectors of cached data in the bucket, which gives us an | |
658 | * indication of the cost in cache misses this eviction will cause. | |
659 | * | |
660 | * - If hotness * sectors used compares equal, we pick the bucket with the | |
661 | * smallest bucket_gc_gen() - since incrementing the same bucket's generation | |
662 | * number repeatedly forces us to run mark and sweep gc to avoid generation | |
663 | * number wraparound. | |
664 | */ | |
665 | ||
666 | static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca, | |
667 | size_t b, struct bucket_mark m) | |
668 | { | |
669 | unsigned last_io = bucket_last_io(c, bucket(ca, b), READ); | |
670 | unsigned max_last_io = ca->max_last_bucket_io[READ]; | |
671 | ||
672 | /* | |
673 | * Time since last read, scaled to [0, 8) where larger value indicates | |
674 | * more recently read data: | |
675 | */ | |
676 | unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io; | |
677 | ||
678 | /* How much we want to keep the data in this bucket: */ | |
679 | unsigned long data_wantness = | |
680 | (hotness + 1) * bucket_sectors_used(m); | |
681 | ||
682 | unsigned long needs_journal_commit = | |
683 | bucket_needs_journal_commit(m, c->journal.last_seq_ondisk); | |
684 | ||
685 | return (data_wantness << 9) | | |
686 | (needs_journal_commit << 8) | | |
f84306a5 | 687 | (bucket_gc_gen(ca, b) / 16); |
1c6fdbd8 KO |
688 | } |
689 | ||
690 | static inline int bucket_alloc_cmp(alloc_heap *h, | |
691 | struct alloc_heap_entry l, | |
692 | struct alloc_heap_entry r) | |
693 | { | |
694 | return (l.key > r.key) - (l.key < r.key) ?: | |
695 | (l.nr < r.nr) - (l.nr > r.nr) ?: | |
696 | (l.bucket > r.bucket) - (l.bucket < r.bucket); | |
697 | } | |
698 | ||
b29e197a KO |
699 | static inline int bucket_idx_cmp(const void *_l, const void *_r) |
700 | { | |
701 | const struct alloc_heap_entry *l = _l, *r = _r; | |
702 | ||
703 | return (l->bucket > r->bucket) - (l->bucket < r->bucket); | |
704 | } | |
705 | ||
1c6fdbd8 KO |
706 | static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) |
707 | { | |
708 | struct bucket_array *buckets; | |
709 | struct alloc_heap_entry e = { 0 }; | |
b29e197a | 710 | size_t b, i, nr = 0; |
1c6fdbd8 KO |
711 | |
712 | ca->alloc_heap.used = 0; | |
713 | ||
714 | mutex_lock(&c->bucket_clock[READ].lock); | |
715 | down_read(&ca->bucket_lock); | |
716 | ||
717 | buckets = bucket_array(ca); | |
718 | ||
719 | bch2_recalc_oldest_io(c, ca, READ); | |
720 | ||
721 | /* | |
722 | * Find buckets with lowest read priority, by building a maxheap sorted | |
723 | * by read priority and repeatedly replacing the maximum element until | |
724 | * all buckets have been visited. | |
725 | */ | |
726 | for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) { | |
727 | struct bucket_mark m = READ_ONCE(buckets->b[b].mark); | |
728 | unsigned long key = bucket_sort_key(c, ca, b, m); | |
729 | ||
730 | if (!bch2_can_invalidate_bucket(ca, b, m)) | |
731 | continue; | |
732 | ||
733 | if (e.nr && e.bucket + e.nr == b && e.key == key) { | |
734 | e.nr++; | |
735 | } else { | |
736 | if (e.nr) | |
198d6700 KO |
737 | heap_add_or_replace(&ca->alloc_heap, e, |
738 | -bucket_alloc_cmp, NULL); | |
1c6fdbd8 KO |
739 | |
740 | e = (struct alloc_heap_entry) { | |
741 | .bucket = b, | |
742 | .nr = 1, | |
743 | .key = key, | |
744 | }; | |
745 | } | |
746 | ||
747 | cond_resched(); | |
748 | } | |
749 | ||
750 | if (e.nr) | |
198d6700 KO |
751 | heap_add_or_replace(&ca->alloc_heap, e, |
752 | -bucket_alloc_cmp, NULL); | |
1c6fdbd8 | 753 | |
b29e197a KO |
754 | for (i = 0; i < ca->alloc_heap.used; i++) |
755 | nr += ca->alloc_heap.data[i].nr; | |
1c6fdbd8 | 756 | |
b29e197a KO |
757 | while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) { |
758 | nr -= ca->alloc_heap.data[0].nr; | |
198d6700 | 759 | heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL); |
1c6fdbd8 | 760 | } |
b29e197a KO |
761 | |
762 | up_read(&ca->bucket_lock); | |
763 | mutex_unlock(&c->bucket_clock[READ].lock); | |
1c6fdbd8 KO |
764 | } |
765 | ||
766 | static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca) | |
767 | { | |
768 | struct bucket_array *buckets = bucket_array(ca); | |
769 | struct bucket_mark m; | |
b29e197a | 770 | size_t b, start; |
1c6fdbd8 | 771 | |
b29e197a KO |
772 | if (ca->fifo_last_bucket < ca->mi.first_bucket || |
773 | ca->fifo_last_bucket >= ca->mi.nbuckets) | |
774 | ca->fifo_last_bucket = ca->mi.first_bucket; | |
775 | ||
776 | start = ca->fifo_last_bucket; | |
1c6fdbd8 | 777 | |
b29e197a KO |
778 | do { |
779 | ca->fifo_last_bucket++; | |
780 | if (ca->fifo_last_bucket == ca->mi.nbuckets) | |
781 | ca->fifo_last_bucket = ca->mi.first_bucket; | |
1c6fdbd8 | 782 | |
b29e197a | 783 | b = ca->fifo_last_bucket; |
1c6fdbd8 KO |
784 | m = READ_ONCE(buckets->b[b].mark); |
785 | ||
b29e197a KO |
786 | if (bch2_can_invalidate_bucket(ca, b, m)) { |
787 | struct alloc_heap_entry e = { .bucket = b, .nr = 1, }; | |
788 | ||
198d6700 | 789 | heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); |
b29e197a KO |
790 | if (heap_full(&ca->alloc_heap)) |
791 | break; | |
792 | } | |
1c6fdbd8 KO |
793 | |
794 | cond_resched(); | |
b29e197a | 795 | } while (ca->fifo_last_bucket != start); |
1c6fdbd8 KO |
796 | } |
797 | ||
798 | static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca) | |
799 | { | |
800 | struct bucket_array *buckets = bucket_array(ca); | |
801 | struct bucket_mark m; | |
b29e197a | 802 | size_t checked, i; |
1c6fdbd8 KO |
803 | |
804 | for (checked = 0; | |
b29e197a | 805 | checked < ca->mi.nbuckets / 2; |
1c6fdbd8 KO |
806 | checked++) { |
807 | size_t b = bch2_rand_range(ca->mi.nbuckets - | |
808 | ca->mi.first_bucket) + | |
809 | ca->mi.first_bucket; | |
810 | ||
811 | m = READ_ONCE(buckets->b[b].mark); | |
812 | ||
b29e197a KO |
813 | if (bch2_can_invalidate_bucket(ca, b, m)) { |
814 | struct alloc_heap_entry e = { .bucket = b, .nr = 1, }; | |
815 | ||
198d6700 | 816 | heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); |
b29e197a KO |
817 | if (heap_full(&ca->alloc_heap)) |
818 | break; | |
819 | } | |
1c6fdbd8 KO |
820 | |
821 | cond_resched(); | |
822 | } | |
b29e197a KO |
823 | |
824 | sort(ca->alloc_heap.data, | |
825 | ca->alloc_heap.used, | |
826 | sizeof(ca->alloc_heap.data[0]), | |
827 | bucket_idx_cmp, NULL); | |
828 | ||
829 | /* remove duplicates: */ | |
830 | for (i = 0; i + 1 < ca->alloc_heap.used; i++) | |
831 | if (ca->alloc_heap.data[i].bucket == | |
832 | ca->alloc_heap.data[i + 1].bucket) | |
833 | ca->alloc_heap.data[i].nr = 0; | |
1c6fdbd8 KO |
834 | } |
835 | ||
b29e197a | 836 | static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca) |
1c6fdbd8 | 837 | { |
b29e197a KO |
838 | size_t i, nr = 0; |
839 | ||
1c6fdbd8 | 840 | ca->inc_gen_needs_gc = 0; |
1c6fdbd8 KO |
841 | |
842 | switch (ca->mi.replacement) { | |
843 | case CACHE_REPLACEMENT_LRU: | |
844 | find_reclaimable_buckets_lru(c, ca); | |
845 | break; | |
846 | case CACHE_REPLACEMENT_FIFO: | |
847 | find_reclaimable_buckets_fifo(c, ca); | |
848 | break; | |
849 | case CACHE_REPLACEMENT_RANDOM: | |
850 | find_reclaimable_buckets_random(c, ca); | |
851 | break; | |
852 | } | |
b29e197a | 853 | |
198d6700 | 854 | heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL); |
b29e197a KO |
855 | |
856 | for (i = 0; i < ca->alloc_heap.used; i++) | |
857 | nr += ca->alloc_heap.data[i].nr; | |
858 | ||
859 | return nr; | |
1c6fdbd8 KO |
860 | } |
861 | ||
b29e197a | 862 | static inline long next_alloc_bucket(struct bch_dev *ca) |
1c6fdbd8 | 863 | { |
b29e197a KO |
864 | struct alloc_heap_entry e, *top = ca->alloc_heap.data; |
865 | ||
866 | while (ca->alloc_heap.used) { | |
867 | if (top->nr) { | |
868 | size_t b = top->bucket; | |
869 | ||
870 | top->bucket++; | |
871 | top->nr--; | |
872 | return b; | |
873 | } | |
1c6fdbd8 | 874 | |
198d6700 | 875 | heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); |
b29e197a KO |
876 | } |
877 | ||
878 | return -1; | |
1c6fdbd8 KO |
879 | } |
880 | ||
8fe826f9 KO |
881 | /* |
882 | * returns sequence number of most recent journal entry that updated this | |
883 | * bucket: | |
884 | */ | |
885 | static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m) | |
886 | { | |
887 | if (m.journal_seq_valid) { | |
888 | u64 journal_seq = atomic64_read(&c->journal.seq); | |
889 | u64 bucket_seq = journal_seq; | |
890 | ||
891 | bucket_seq &= ~((u64) U16_MAX); | |
892 | bucket_seq |= m.journal_seq; | |
893 | ||
894 | if (bucket_seq > journal_seq) | |
895 | bucket_seq -= 1 << 16; | |
896 | ||
897 | return bucket_seq; | |
898 | } else { | |
899 | return 0; | |
900 | } | |
901 | } | |
902 | ||
0564b167 KO |
903 | static int bch2_invalidate_one_bucket2(struct btree_trans *trans, |
904 | struct bch_dev *ca, | |
8fe826f9 KO |
905 | struct btree_iter *iter, |
906 | u64 *journal_seq, unsigned flags) | |
907 | { | |
908 | #if 0 | |
909 | __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key; | |
910 | #else | |
911 | /* hack: */ | |
912 | __BKEY_PADDED(k, 8) alloc_key; | |
913 | #endif | |
0564b167 | 914 | struct bch_fs *c = trans->c; |
8fe826f9 KO |
915 | struct bkey_i_alloc *a; |
916 | struct bkey_alloc_unpacked u; | |
917 | struct bucket_mark m; | |
918 | struct bkey_s_c k; | |
919 | bool invalidating_cached_data; | |
920 | size_t b; | |
921 | int ret; | |
922 | ||
923 | BUG_ON(!ca->alloc_heap.used || | |
924 | !ca->alloc_heap.data[0].nr); | |
925 | b = ca->alloc_heap.data[0].bucket; | |
926 | ||
927 | /* first, put on free_inc and mark as owned by allocator: */ | |
928 | percpu_down_read(&c->mark_lock); | |
929 | spin_lock(&c->freelist_lock); | |
930 | ||
931 | verify_not_on_freelist(c, ca, b); | |
932 | ||
933 | BUG_ON(!fifo_push(&ca->free_inc, b)); | |
934 | ||
935 | bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0); | |
936 | m = bucket(ca, b)->mark; | |
937 | ||
938 | spin_unlock(&c->freelist_lock); | |
939 | percpu_up_read(&c->mark_lock); | |
940 | ||
941 | bch2_btree_iter_cond_resched(iter); | |
942 | ||
943 | BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8); | |
944 | ||
945 | bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b)); | |
946 | retry: | |
947 | k = bch2_btree_iter_peek_slot(iter); | |
948 | ret = btree_iter_err(k); | |
949 | if (ret) | |
950 | return ret; | |
951 | ||
952 | if (k.k && k.k->type == KEY_TYPE_alloc) | |
953 | u = bch2_alloc_unpack(bkey_s_c_to_alloc(k).v); | |
954 | else | |
955 | memset(&u, 0, sizeof(u)); | |
956 | ||
18c9883e | 957 | invalidating_cached_data = m.cached_sectors != 0; |
8fe826f9 KO |
958 | |
959 | //BUG_ON(u.dirty_sectors); | |
960 | u.data_type = 0; | |
961 | u.dirty_sectors = 0; | |
962 | u.cached_sectors = 0; | |
963 | u.read_time = c->bucket_clock[READ].hand; | |
964 | u.write_time = c->bucket_clock[WRITE].hand; | |
18c9883e KO |
965 | |
966 | /* | |
967 | * The allocator has to start before journal replay is finished - thus, | |
968 | * we have to trust the in memory bucket @m, not the version in the | |
969 | * btree: | |
970 | */ | |
971 | u.gen = m.gen + 1; | |
8fe826f9 KO |
972 | |
973 | a = bkey_alloc_init(&alloc_key.k); | |
974 | a->k.p = iter->pos; | |
975 | bch2_alloc_pack(a, u); | |
976 | ||
0564b167 KO |
977 | bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i)); |
978 | ||
61f321fc KO |
979 | /* |
980 | * XXX: | |
981 | * when using deferred btree updates, we have journal reclaim doing | |
982 | * btree updates and thus requiring the allocator to make forward | |
983 | * progress, and here the allocator is requiring space in the journal - | |
984 | * so we need a journal pre-reservation: | |
985 | */ | |
0564b167 KO |
986 | ret = bch2_trans_commit(trans, NULL, |
987 | invalidating_cached_data ? journal_seq : NULL, | |
988 | BTREE_INSERT_ATOMIC| | |
989 | BTREE_INSERT_NOUNLOCK| | |
990 | BTREE_INSERT_NOCHECK_RW| | |
991 | BTREE_INSERT_NOFAIL| | |
992 | BTREE_INSERT_USE_RESERVE| | |
993 | BTREE_INSERT_USE_ALLOC_RESERVE| | |
994 | flags); | |
8fe826f9 KO |
995 | if (ret == -EINTR) |
996 | goto retry; | |
997 | ||
998 | if (!ret) { | |
999 | /* remove from alloc_heap: */ | |
1000 | struct alloc_heap_entry e, *top = ca->alloc_heap.data; | |
1001 | ||
1002 | top->bucket++; | |
1003 | top->nr--; | |
1004 | ||
1005 | if (!top->nr) | |
1006 | heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); | |
1007 | ||
18c9883e KO |
1008 | /* with btree still locked: */ |
1009 | if (ca->buckets_written) | |
1010 | set_bit(b, ca->buckets_written); | |
1011 | ||
8fe826f9 KO |
1012 | /* |
1013 | * Make sure we flush the last journal entry that updated this | |
1014 | * bucket (i.e. deleting the last reference) before writing to | |
1015 | * this bucket again: | |
1016 | */ | |
1017 | *journal_seq = max(*journal_seq, bucket_journal_seq(c, m)); | |
1018 | } else { | |
1019 | size_t b2; | |
1020 | ||
1021 | /* remove from free_inc: */ | |
1022 | percpu_down_read(&c->mark_lock); | |
1023 | spin_lock(&c->freelist_lock); | |
1024 | ||
1025 | bch2_mark_alloc_bucket(c, ca, b, false, | |
1026 | gc_pos_alloc(c, NULL), 0); | |
1027 | ||
1028 | BUG_ON(!fifo_pop_back(&ca->free_inc, b2)); | |
1029 | BUG_ON(b != b2); | |
1030 | ||
1031 | spin_unlock(&c->freelist_lock); | |
1032 | percpu_up_read(&c->mark_lock); | |
1033 | } | |
1034 | ||
1035 | return ret; | |
1036 | } | |
1037 | ||
b29e197a KO |
1038 | static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, |
1039 | size_t bucket, u64 *flush_seq) | |
1c6fdbd8 | 1040 | { |
b29e197a | 1041 | struct bucket_mark m; |
1c6fdbd8 | 1042 | |
9166b41d | 1043 | percpu_down_read(&c->mark_lock); |
1c6fdbd8 | 1044 | spin_lock(&c->freelist_lock); |
b29e197a KO |
1045 | |
1046 | bch2_invalidate_bucket(c, ca, bucket, &m); | |
1047 | ||
1048 | verify_not_on_freelist(c, ca, bucket); | |
1049 | BUG_ON(!fifo_push(&ca->free_inc, bucket)); | |
1050 | ||
1c6fdbd8 | 1051 | spin_unlock(&c->freelist_lock); |
b29e197a KO |
1052 | |
1053 | bucket_io_clock_reset(c, ca, bucket, READ); | |
1054 | bucket_io_clock_reset(c, ca, bucket, WRITE); | |
1055 | ||
9166b41d | 1056 | percpu_up_read(&c->mark_lock); |
b29e197a | 1057 | |
8fe826f9 | 1058 | *flush_seq = max(*flush_seq, bucket_journal_seq(c, m)); |
b29e197a KO |
1059 | |
1060 | return m.cached_sectors != 0; | |
1c6fdbd8 KO |
1061 | } |
1062 | ||
b29e197a KO |
1063 | /* |
1064 | * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc: | |
1065 | */ | |
1066 | static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca) | |
1c6fdbd8 | 1067 | { |
0564b167 KO |
1068 | struct btree_trans trans; |
1069 | struct btree_iter *iter; | |
b29e197a | 1070 | u64 journal_seq = 0; |
1c6fdbd8 KO |
1071 | int ret = 0; |
1072 | ||
0564b167 KO |
1073 | bch2_trans_init(&trans, c); |
1074 | ||
1075 | iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, | |
1076 | POS(ca->dev_idx, 0), | |
1077 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
1c6fdbd8 KO |
1078 | |
1079 | /* Only use nowait if we've already invalidated at least one bucket: */ | |
b29e197a KO |
1080 | while (!ret && |
1081 | !fifo_full(&ca->free_inc) && | |
8fe826f9 | 1082 | ca->alloc_heap.used) |
0564b167 | 1083 | ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq, |
053dbb37 | 1084 | BTREE_INSERT_GC_LOCK_HELD| |
8c96cfcc KO |
1085 | (!fifo_empty(&ca->free_inc) |
1086 | ? BTREE_INSERT_NOWAIT : 0)); | |
1c6fdbd8 | 1087 | |
0564b167 | 1088 | bch2_trans_exit(&trans); |
1c6fdbd8 KO |
1089 | |
1090 | /* If we used NOWAIT, don't return the error: */ | |
b29e197a KO |
1091 | if (!fifo_empty(&ca->free_inc)) |
1092 | ret = 0; | |
1093 | if (ret) { | |
1094 | bch_err(ca, "error invalidating buckets: %i", ret); | |
1095 | return ret; | |
1096 | } | |
1c6fdbd8 | 1097 | |
b29e197a KO |
1098 | if (journal_seq) |
1099 | ret = bch2_journal_flush_seq(&c->journal, journal_seq); | |
1100 | if (ret) { | |
1101 | bch_err(ca, "journal error: %i", ret); | |
1102 | return ret; | |
1103 | } | |
1c6fdbd8 | 1104 | |
b29e197a | 1105 | return 0; |
1c6fdbd8 KO |
1106 | } |
1107 | ||
1108 | static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket) | |
1109 | { | |
b29e197a | 1110 | unsigned i; |
1c6fdbd8 KO |
1111 | int ret = 0; |
1112 | ||
1113 | while (1) { | |
1114 | set_current_state(TASK_INTERRUPTIBLE); | |
1115 | ||
b29e197a KO |
1116 | spin_lock(&c->freelist_lock); |
1117 | for (i = 0; i < RESERVE_NR; i++) | |
1118 | if (fifo_push(&ca->free[i], bucket)) { | |
1119 | fifo_pop(&ca->free_inc, bucket); | |
430735cd | 1120 | |
b29e197a | 1121 | closure_wake_up(&c->freelist_wait); |
430735cd KO |
1122 | ca->allocator_blocked_full = false; |
1123 | ||
b29e197a KO |
1124 | spin_unlock(&c->freelist_lock); |
1125 | goto out; | |
1126 | } | |
430735cd KO |
1127 | |
1128 | if (!ca->allocator_blocked_full) { | |
1129 | ca->allocator_blocked_full = true; | |
1130 | closure_wake_up(&c->freelist_wait); | |
1131 | } | |
1132 | ||
b29e197a | 1133 | spin_unlock(&c->freelist_lock); |
1c6fdbd8 KO |
1134 | |
1135 | if ((current->flags & PF_KTHREAD) && | |
1136 | kthread_should_stop()) { | |
1137 | ret = 1; | |
1138 | break; | |
1139 | } | |
1140 | ||
1141 | schedule(); | |
1142 | try_to_freeze(); | |
1143 | } | |
b29e197a | 1144 | out: |
1c6fdbd8 KO |
1145 | __set_current_state(TASK_RUNNING); |
1146 | return ret; | |
1147 | } | |
1148 | ||
1149 | /* | |
b29e197a KO |
1150 | * Pulls buckets off free_inc, discards them (if enabled), then adds them to |
1151 | * freelists, waiting until there's room if necessary: | |
1c6fdbd8 KO |
1152 | */ |
1153 | static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca) | |
1154 | { | |
b29e197a | 1155 | while (!fifo_empty(&ca->free_inc)) { |
1c6fdbd8 KO |
1156 | size_t bucket = fifo_peek(&ca->free_inc); |
1157 | ||
1c6fdbd8 KO |
1158 | if (ca->mi.discard && |
1159 | bdev_max_discard_sectors(ca->disk_sb.bdev)) | |
1160 | blkdev_issue_discard(ca->disk_sb.bdev, | |
1161 | bucket_to_sector(ca, bucket), | |
1162 | ca->mi.bucket_size, GFP_NOIO); | |
1163 | ||
1164 | if (push_invalidated_bucket(c, ca, bucket)) | |
1165 | return 1; | |
1166 | } | |
1167 | ||
1168 | return 0; | |
1169 | } | |
1170 | ||
1171 | /** | |
1172 | * bch_allocator_thread - move buckets from free_inc to reserves | |
1173 | * | |
1174 | * The free_inc FIFO is populated by find_reclaimable_buckets(), and | |
1175 | * the reserves are depleted by bucket allocation. When we run out | |
1176 | * of free_inc, try to invalidate some buckets and write out | |
1177 | * prios and gens. | |
1178 | */ | |
1179 | static int bch2_allocator_thread(void *arg) | |
1180 | { | |
1181 | struct bch_dev *ca = arg; | |
1182 | struct bch_fs *c = ca->fs; | |
b29e197a | 1183 | size_t nr; |
1c6fdbd8 KO |
1184 | int ret; |
1185 | ||
1186 | set_freezable(); | |
1187 | ||
1188 | while (1) { | |
b29e197a | 1189 | cond_resched(); |
1c6fdbd8 | 1190 | |
b29e197a KO |
1191 | pr_debug("discarding %zu invalidated buckets", |
1192 | fifo_used(&ca->free_inc)); | |
1c6fdbd8 | 1193 | |
b29e197a KO |
1194 | ret = discard_invalidated_buckets(c, ca); |
1195 | if (ret) | |
1196 | goto stop; | |
1c6fdbd8 | 1197 | |
94c1f4ad KO |
1198 | down_read(&c->gc_lock); |
1199 | ||
b29e197a | 1200 | ret = bch2_invalidate_buckets(c, ca); |
94c1f4ad KO |
1201 | if (ret) { |
1202 | up_read(&c->gc_lock); | |
b29e197a | 1203 | goto stop; |
94c1f4ad | 1204 | } |
1c6fdbd8 | 1205 | |
94c1f4ad KO |
1206 | if (!fifo_empty(&ca->free_inc)) { |
1207 | up_read(&c->gc_lock); | |
b29e197a | 1208 | continue; |
94c1f4ad | 1209 | } |
1c6fdbd8 KO |
1210 | |
1211 | pr_debug("free_inc now empty"); | |
1212 | ||
b29e197a | 1213 | do { |
1c6fdbd8 KO |
1214 | /* |
1215 | * Find some buckets that we can invalidate, either | |
1216 | * they're completely unused, or only contain clean data | |
1217 | * that's been written back to the backing device or | |
1218 | * another cache tier | |
1219 | */ | |
1220 | ||
1221 | pr_debug("scanning for reclaimable buckets"); | |
1222 | ||
b29e197a | 1223 | nr = find_reclaimable_buckets(c, ca); |
1c6fdbd8 | 1224 | |
b29e197a | 1225 | pr_debug("found %zu buckets", nr); |
1c6fdbd8 | 1226 | |
b29e197a | 1227 | trace_alloc_batch(ca, nr, ca->alloc_heap.size); |
1c6fdbd8 | 1228 | |
b29e197a KO |
1229 | if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) || |
1230 | ca->inc_gen_really_needs_gc) && | |
1c6fdbd8 KO |
1231 | c->gc_thread) { |
1232 | atomic_inc(&c->kick_gc); | |
1233 | wake_up_process(c->gc_thread); | |
1234 | } | |
1235 | ||
1c6fdbd8 | 1236 | /* |
b29e197a KO |
1237 | * If we found any buckets, we have to invalidate them |
1238 | * before we scan for more - but if we didn't find very | |
1239 | * many we may want to wait on more buckets being | |
1240 | * available so we don't spin: | |
1c6fdbd8 | 1241 | */ |
b29e197a KO |
1242 | if (!nr || |
1243 | (nr < ALLOC_SCAN_BATCH(ca) && | |
1244 | !fifo_full(&ca->free[RESERVE_MOVINGGC]))) { | |
1245 | ca->allocator_blocked = true; | |
1246 | closure_wake_up(&c->freelist_wait); | |
1247 | ||
1248 | ret = wait_buckets_available(c, ca); | |
1249 | if (ret) { | |
1250 | up_read(&c->gc_lock); | |
1251 | goto stop; | |
1252 | } | |
1c6fdbd8 | 1253 | } |
b29e197a | 1254 | } while (!nr); |
1c6fdbd8 KO |
1255 | |
1256 | ca->allocator_blocked = false; | |
1257 | up_read(&c->gc_lock); | |
1258 | ||
b29e197a | 1259 | pr_debug("%zu buckets to invalidate", nr); |
1c6fdbd8 KO |
1260 | |
1261 | /* | |
b29e197a | 1262 | * alloc_heap is now full of newly-invalidated buckets: next, |
1c6fdbd8 KO |
1263 | * write out the new bucket gens: |
1264 | */ | |
1265 | } | |
1266 | ||
1267 | stop: | |
1268 | pr_debug("alloc thread stopping (ret %i)", ret); | |
1269 | return 0; | |
1270 | } | |
1271 | ||
1c6fdbd8 KO |
1272 | /* Startup/shutdown (ro/rw): */ |
1273 | ||
1274 | void bch2_recalc_capacity(struct bch_fs *c) | |
1275 | { | |
1276 | struct bch_dev *ca; | |
a50ed7c8 | 1277 | u64 capacity = 0, reserved_sectors = 0, gc_reserve; |
b092dadd | 1278 | unsigned bucket_size_max = 0; |
1c6fdbd8 KO |
1279 | unsigned long ra_pages = 0; |
1280 | unsigned i, j; | |
1281 | ||
1282 | lockdep_assert_held(&c->state_lock); | |
1283 | ||
1284 | for_each_online_member(ca, c, i) { | |
1285 | struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; | |
1286 | ||
1287 | ra_pages += bdi->ra_pages; | |
1288 | } | |
1289 | ||
1290 | bch2_set_ra_pages(c, ra_pages); | |
1291 | ||
1292 | for_each_rw_member(ca, c, i) { | |
a50ed7c8 | 1293 | u64 dev_reserve = 0; |
1c6fdbd8 KO |
1294 | |
1295 | /* | |
1296 | * We need to reserve buckets (from the number | |
1297 | * of currently available buckets) against | |
1298 | * foreground writes so that mainly copygc can | |
1299 | * make forward progress. | |
1300 | * | |
1301 | * We need enough to refill the various reserves | |
1302 | * from scratch - copygc will use its entire | |
1303 | * reserve all at once, then run against when | |
1304 | * its reserve is refilled (from the formerly | |
1305 | * available buckets). | |
1306 | * | |
1307 | * This reserve is just used when considering if | |
1308 | * allocations for foreground writes must wait - | |
1309 | * not -ENOSPC calculations. | |
1310 | */ | |
1311 | for (j = 0; j < RESERVE_NONE; j++) | |
a9bec520 | 1312 | dev_reserve += ca->free[j].size; |
1c6fdbd8 | 1313 | |
a9bec520 KO |
1314 | dev_reserve += 1; /* btree write point */ |
1315 | dev_reserve += 1; /* copygc write point */ | |
1316 | dev_reserve += 1; /* rebalance write point */ | |
1c6fdbd8 | 1317 | |
a9bec520 | 1318 | dev_reserve *= ca->mi.bucket_size; |
1c6fdbd8 | 1319 | |
a50ed7c8 | 1320 | ca->copygc_threshold = dev_reserve; |
a9bec520 | 1321 | |
a50ed7c8 KO |
1322 | capacity += bucket_to_sector(ca, ca->mi.nbuckets - |
1323 | ca->mi.first_bucket); | |
1c6fdbd8 | 1324 | |
a50ed7c8 | 1325 | reserved_sectors += dev_reserve * 2; |
b092dadd KO |
1326 | |
1327 | bucket_size_max = max_t(unsigned, bucket_size_max, | |
1328 | ca->mi.bucket_size); | |
a9bec520 | 1329 | } |
1c6fdbd8 | 1330 | |
a50ed7c8 KO |
1331 | gc_reserve = c->opts.gc_reserve_bytes |
1332 | ? c->opts.gc_reserve_bytes >> 9 | |
1333 | : div64_u64(capacity * c->opts.gc_reserve_percent, 100); | |
1334 | ||
1335 | reserved_sectors = max(gc_reserve, reserved_sectors); | |
1c6fdbd8 | 1336 | |
a50ed7c8 | 1337 | reserved_sectors = min(reserved_sectors, capacity); |
1c6fdbd8 | 1338 | |
a9bec520 | 1339 | c->capacity = capacity - reserved_sectors; |
1c6fdbd8 | 1340 | |
b092dadd KO |
1341 | c->bucket_size_max = bucket_size_max; |
1342 | ||
1c6fdbd8 KO |
1343 | if (c->capacity) { |
1344 | bch2_io_timer_add(&c->io_clock[READ], | |
1345 | &c->bucket_clock[READ].rescale); | |
1346 | bch2_io_timer_add(&c->io_clock[WRITE], | |
1347 | &c->bucket_clock[WRITE].rescale); | |
1348 | } else { | |
1349 | bch2_io_timer_del(&c->io_clock[READ], | |
1350 | &c->bucket_clock[READ].rescale); | |
1351 | bch2_io_timer_del(&c->io_clock[WRITE], | |
1352 | &c->bucket_clock[WRITE].rescale); | |
1353 | } | |
1354 | ||
1355 | /* Wake up case someone was waiting for buckets */ | |
1356 | closure_wake_up(&c->freelist_wait); | |
1357 | } | |
1358 | ||
1c6fdbd8 KO |
1359 | static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) |
1360 | { | |
1361 | struct open_bucket *ob; | |
1362 | bool ret = false; | |
1363 | ||
1364 | for (ob = c->open_buckets; | |
1365 | ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); | |
1366 | ob++) { | |
1367 | spin_lock(&ob->lock); | |
1368 | if (ob->valid && !ob->on_partial_list && | |
1369 | ob->ptr.dev == ca->dev_idx) | |
1370 | ret = true; | |
1371 | spin_unlock(&ob->lock); | |
1372 | } | |
1373 | ||
1374 | return ret; | |
1375 | } | |
1376 | ||
1377 | /* device goes ro: */ | |
1378 | void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) | |
1379 | { | |
1380 | unsigned i; | |
1381 | ||
1382 | BUG_ON(ca->alloc_thread); | |
1383 | ||
1384 | /* First, remove device from allocation groups: */ | |
1385 | ||
1386 | for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++) | |
1387 | clear_bit(ca->dev_idx, c->rw_devs[i].d); | |
1388 | ||
1389 | /* | |
1390 | * Capacity is calculated based off of devices in allocation groups: | |
1391 | */ | |
1392 | bch2_recalc_capacity(c); | |
1393 | ||
1394 | /* Next, close write points that point to this device... */ | |
1395 | for (i = 0; i < ARRAY_SIZE(c->write_points); i++) | |
7b3f84ea | 1396 | bch2_writepoint_stop(c, ca, &c->write_points[i]); |
1c6fdbd8 | 1397 | |
7b3f84ea KO |
1398 | bch2_writepoint_stop(c, ca, &ca->copygc_write_point); |
1399 | bch2_writepoint_stop(c, ca, &c->rebalance_write_point); | |
1400 | bch2_writepoint_stop(c, ca, &c->btree_write_point); | |
1c6fdbd8 KO |
1401 | |
1402 | mutex_lock(&c->btree_reserve_cache_lock); | |
1403 | while (c->btree_reserve_cache_nr) { | |
1404 | struct btree_alloc *a = | |
1405 | &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; | |
1406 | ||
ef337c54 | 1407 | bch2_open_buckets_put(c, &a->ob); |
1c6fdbd8 KO |
1408 | } |
1409 | mutex_unlock(&c->btree_reserve_cache_lock); | |
1410 | ||
cd575ddf KO |
1411 | while (1) { |
1412 | struct open_bucket *ob; | |
1413 | ||
1414 | spin_lock(&c->freelist_lock); | |
1415 | if (!ca->open_buckets_partial_nr) { | |
1416 | spin_unlock(&c->freelist_lock); | |
1417 | break; | |
1418 | } | |
1419 | ob = c->open_buckets + | |
1420 | ca->open_buckets_partial[--ca->open_buckets_partial_nr]; | |
1421 | ob->on_partial_list = false; | |
1422 | spin_unlock(&c->freelist_lock); | |
1423 | ||
1424 | bch2_open_bucket_put(c, ob); | |
1425 | } | |
1426 | ||
1427 | bch2_ec_stop_dev(c, ca); | |
1428 | ||
1c6fdbd8 KO |
1429 | /* |
1430 | * Wake up threads that were blocked on allocation, so they can notice | |
1431 | * the device can no longer be removed and the capacity has changed: | |
1432 | */ | |
1433 | closure_wake_up(&c->freelist_wait); | |
1434 | ||
1435 | /* | |
1436 | * journal_res_get() can block waiting for free space in the journal - | |
1437 | * it needs to notice there may not be devices to allocate from anymore: | |
1438 | */ | |
1439 | wake_up(&c->journal.wait); | |
1440 | ||
1441 | /* Now wait for any in flight writes: */ | |
1442 | ||
1443 | closure_wait_event(&c->open_buckets_wait, | |
1444 | !bch2_dev_has_open_write_point(c, ca)); | |
1445 | } | |
1446 | ||
1447 | /* device goes rw: */ | |
1448 | void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) | |
1449 | { | |
1450 | unsigned i; | |
1451 | ||
1452 | for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++) | |
1453 | if (ca->mi.data_allowed & (1 << i)) | |
1454 | set_bit(ca->dev_idx, c->rw_devs[i].d); | |
1455 | } | |
1456 | ||
430735cd KO |
1457 | void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca) |
1458 | { | |
736affa8 KO |
1459 | if (ca->alloc_thread) |
1460 | closure_wait_event(&c->freelist_wait, ca->allocator_blocked_full); | |
430735cd KO |
1461 | } |
1462 | ||
1c6fdbd8 KO |
1463 | /* stop allocator thread: */ |
1464 | void bch2_dev_allocator_stop(struct bch_dev *ca) | |
1465 | { | |
1466 | struct task_struct *p; | |
1467 | ||
1468 | p = rcu_dereference_protected(ca->alloc_thread, 1); | |
1469 | ca->alloc_thread = NULL; | |
1470 | ||
1471 | /* | |
1472 | * We need an rcu barrier between setting ca->alloc_thread = NULL and | |
1473 | * the thread shutting down to avoid bch2_wake_allocator() racing: | |
1474 | * | |
1475 | * XXX: it would be better to have the rcu barrier be asynchronous | |
1476 | * instead of blocking us here | |
1477 | */ | |
1478 | synchronize_rcu(); | |
1479 | ||
1480 | if (p) { | |
1481 | kthread_stop(p); | |
1482 | put_task_struct(p); | |
1483 | } | |
1484 | } | |
1485 | ||
1486 | /* start allocator thread: */ | |
1487 | int bch2_dev_allocator_start(struct bch_dev *ca) | |
1488 | { | |
1489 | struct task_struct *p; | |
1490 | ||
1491 | /* | |
1492 | * allocator thread already started? | |
1493 | */ | |
1494 | if (ca->alloc_thread) | |
1495 | return 0; | |
1496 | ||
1497 | p = kthread_create(bch2_allocator_thread, ca, | |
1498 | "bch_alloc[%s]", ca->name); | |
1499 | if (IS_ERR(p)) | |
1500 | return PTR_ERR(p); | |
1501 | ||
1502 | get_task_struct(p); | |
1503 | rcu_assign_pointer(ca->alloc_thread, p); | |
1504 | wake_up_process(p); | |
1505 | return 0; | |
1506 | } | |
1507 | ||
fcbf3e50 | 1508 | static bool flush_held_btree_writes(struct bch_fs *c) |
b29e197a KO |
1509 | { |
1510 | struct bucket_table *tbl; | |
1511 | struct rhash_head *pos; | |
1512 | struct btree *b; | |
1633e492 | 1513 | bool nodes_unwritten; |
d0cc3def | 1514 | size_t i; |
b29e197a | 1515 | again: |
b29e197a | 1516 | cond_resched(); |
1633e492 | 1517 | nodes_unwritten = false; |
b29e197a KO |
1518 | |
1519 | rcu_read_lock(); | |
1520 | for_each_cached_btree(b, c, tbl, i, pos) | |
d0cc3def | 1521 | if (btree_node_need_write(b)) { |
b29e197a KO |
1522 | if (btree_node_may_write(b)) { |
1523 | rcu_read_unlock(); | |
1524 | btree_node_lock_type(c, b, SIX_LOCK_read); | |
1525 | bch2_btree_node_write(c, b, SIX_LOCK_read); | |
1526 | six_unlock_read(&b->lock); | |
1527 | goto again; | |
1528 | } else { | |
1633e492 | 1529 | nodes_unwritten = true; |
b29e197a KO |
1530 | } |
1531 | } | |
1532 | rcu_read_unlock(); | |
1533 | ||
1633e492 | 1534 | if (c->btree_roots_dirty) { |
b29e197a | 1535 | bch2_journal_meta(&c->journal); |
b29e197a KO |
1536 | goto again; |
1537 | } | |
1538 | ||
1633e492 KO |
1539 | return !nodes_unwritten && |
1540 | !bch2_btree_interior_updates_nr_pending(c); | |
1541 | } | |
1542 | ||
1c6fdbd8 KO |
1543 | static void allocator_start_issue_discards(struct bch_fs *c) |
1544 | { | |
1545 | struct bch_dev *ca; | |
1546 | unsigned dev_iter; | |
b29e197a | 1547 | size_t bu; |
1c6fdbd8 | 1548 | |
b29e197a KO |
1549 | for_each_rw_member(ca, c, dev_iter) |
1550 | while (fifo_pop(&ca->free_inc, bu)) | |
1c6fdbd8 KO |
1551 | blkdev_issue_discard(ca->disk_sb.bdev, |
1552 | bucket_to_sector(ca, bu), | |
1553 | ca->mi.bucket_size, GFP_NOIO); | |
1c6fdbd8 KO |
1554 | } |
1555 | ||
5e5d9bdb KO |
1556 | static int resize_free_inc(struct bch_dev *ca) |
1557 | { | |
1558 | alloc_fifo free_inc; | |
1559 | ||
1560 | if (!fifo_full(&ca->free_inc)) | |
1561 | return 0; | |
1562 | ||
1563 | if (!init_fifo(&free_inc, | |
1564 | ca->free_inc.size * 2, | |
1565 | GFP_KERNEL)) | |
1566 | return -ENOMEM; | |
1567 | ||
1568 | fifo_move(&free_inc, &ca->free_inc); | |
1569 | swap(free_inc, ca->free_inc); | |
1570 | free_fifo(&free_inc); | |
1571 | return 0; | |
1572 | } | |
1573 | ||
fcbf3e50 | 1574 | static bool bch2_fs_allocator_start_fast(struct bch_fs *c) |
1c6fdbd8 KO |
1575 | { |
1576 | struct bch_dev *ca; | |
1c6fdbd8 | 1577 | unsigned dev_iter; |
fcbf3e50 | 1578 | bool ret = true; |
1c6fdbd8 | 1579 | |
d0cc3def | 1580 | if (test_alloc_startup(c)) |
fcbf3e50 KO |
1581 | return false; |
1582 | ||
1583 | down_read(&c->gc_lock); | |
b29e197a | 1584 | |
1c6fdbd8 KO |
1585 | /* Scan for buckets that are already invalidated: */ |
1586 | for_each_rw_member(ca, c, dev_iter) { | |
61274e9d | 1587 | struct bucket_array *buckets; |
1c6fdbd8 | 1588 | struct bucket_mark m; |
fcbf3e50 | 1589 | long bu; |
1c6fdbd8 | 1590 | |
61274e9d | 1591 | down_read(&ca->bucket_lock); |
61274e9d | 1592 | buckets = bucket_array(ca); |
1c6fdbd8 | 1593 | |
61274e9d KO |
1594 | for (bu = buckets->first_bucket; |
1595 | bu < buckets->nbuckets; bu++) { | |
1596 | m = READ_ONCE(buckets->b[bu].mark); | |
1c6fdbd8 | 1597 | |
90541a74 | 1598 | if (!buckets->b[bu].gen_valid || |
61274e9d | 1599 | !is_available_bucket(m) || |
fcbf3e50 KO |
1600 | m.cached_sectors || |
1601 | (ca->buckets_nouse && | |
1602 | test_bit(bu, ca->buckets_nouse))) | |
1c6fdbd8 KO |
1603 | continue; |
1604 | ||
fcbf3e50 | 1605 | percpu_down_read(&c->mark_lock); |
1c6fdbd8 | 1606 | bch2_mark_alloc_bucket(c, ca, bu, true, |
9ca53b55 | 1607 | gc_pos_alloc(c, NULL), 0); |
fcbf3e50 | 1608 | percpu_up_read(&c->mark_lock); |
1c6fdbd8 KO |
1609 | |
1610 | fifo_push(&ca->free_inc, bu); | |
1c6fdbd8 | 1611 | |
61274e9d KO |
1612 | discard_invalidated_buckets(c, ca); |
1613 | ||
1614 | if (fifo_full(&ca->free[RESERVE_BTREE])) | |
1c6fdbd8 KO |
1615 | break; |
1616 | } | |
61274e9d | 1617 | up_read(&ca->bucket_lock); |
1c6fdbd8 KO |
1618 | } |
1619 | ||
fcbf3e50 KO |
1620 | up_read(&c->gc_lock); |
1621 | ||
1c6fdbd8 KO |
1622 | /* did we find enough buckets? */ |
1623 | for_each_rw_member(ca, c, dev_iter) | |
fcbf3e50 KO |
1624 | if (!fifo_full(&ca->free[RESERVE_BTREE])) |
1625 | ret = false; | |
1626 | ||
1627 | return ret; | |
1628 | } | |
1629 | ||
134915f3 | 1630 | int bch2_fs_allocator_start(struct bch_fs *c) |
fcbf3e50 KO |
1631 | { |
1632 | struct bch_dev *ca; | |
1633 | unsigned dev_iter; | |
1634 | u64 journal_seq = 0; | |
1635 | bool wrote; | |
1636 | long bu; | |
1637 | int ret = 0; | |
1c6fdbd8 | 1638 | |
134915f3 KO |
1639 | if (!test_alloc_startup(c) && |
1640 | bch2_fs_allocator_start_fast(c)) | |
1641 | return 0; | |
1642 | ||
61274e9d | 1643 | pr_debug("not enough empty buckets; scanning for reclaimable buckets"); |
1c6fdbd8 | 1644 | |
1c6fdbd8 KO |
1645 | /* |
1646 | * We're moving buckets to freelists _before_ they've been marked as | |
1647 | * invalidated on disk - we have to so that we can allocate new btree | |
1648 | * nodes to mark them as invalidated on disk. | |
1649 | * | |
1650 | * However, we can't _write_ to any of these buckets yet - they might | |
1651 | * have cached data in them, which is live until they're marked as | |
1652 | * invalidated on disk: | |
1653 | */ | |
d0cc3def | 1654 | set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags); |
1c6fdbd8 | 1655 | |
fcbf3e50 KO |
1656 | down_read(&c->gc_lock); |
1657 | do { | |
1658 | wrote = false; | |
1c6fdbd8 | 1659 | |
d0cc3def KO |
1660 | for_each_rw_member(ca, c, dev_iter) { |
1661 | find_reclaimable_buckets(c, ca); | |
1c6fdbd8 | 1662 | |
d0cc3def KO |
1663 | while (!fifo_full(&ca->free[RESERVE_BTREE]) && |
1664 | (bu = next_alloc_bucket(ca)) >= 0) { | |
5e5d9bdb KO |
1665 | ret = resize_free_inc(ca); |
1666 | if (ret) { | |
1667 | percpu_ref_put(&ca->io_ref); | |
fcbf3e50 KO |
1668 | up_read(&c->gc_lock); |
1669 | goto err; | |
5e5d9bdb KO |
1670 | } |
1671 | ||
d0cc3def KO |
1672 | bch2_invalidate_one_bucket(c, ca, bu, |
1673 | &journal_seq); | |
1674 | ||
1675 | fifo_push(&ca->free[RESERVE_BTREE], bu); | |
d0cc3def KO |
1676 | } |
1677 | } | |
1678 | ||
1679 | pr_debug("done scanning for reclaimable buckets"); | |
1680 | ||
1681 | /* | |
1682 | * XXX: it's possible for this to deadlock waiting on journal reclaim, | |
1683 | * since we're holding btree writes. What then? | |
1684 | */ | |
1685 | ret = bch2_alloc_write(c, true, &wrote); | |
1c6fdbd8 | 1686 | |
d0cc3def KO |
1687 | /* |
1688 | * If bch2_alloc_write() did anything, it may have used some | |
1689 | * buckets, and we need the RESERVE_BTREE freelist full - so we | |
1690 | * need to loop and scan again. | |
1691 | * And if it errored, it may have been because there weren't | |
1692 | * enough buckets, so just scan and loop again as long as it | |
1693 | * made some progress: | |
1694 | */ | |
fcbf3e50 KO |
1695 | } while (wrote); |
1696 | up_read(&c->gc_lock); | |
1697 | ||
1698 | if (ret) | |
1699 | goto err; | |
1c6fdbd8 | 1700 | |
d0cc3def KO |
1701 | pr_debug("flushing journal"); |
1702 | ||
1703 | ret = bch2_journal_flush(&c->journal); | |
1704 | if (ret) | |
fcbf3e50 | 1705 | goto err; |
d0cc3def KO |
1706 | |
1707 | pr_debug("issuing discards"); | |
1708 | allocator_start_issue_discards(c); | |
fcbf3e50 KO |
1709 | err: |
1710 | clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags); | |
1711 | closure_wait_event(&c->btree_interior_update_wait, | |
1712 | flush_held_btree_writes(c)); | |
d0cc3def | 1713 | |
fcbf3e50 | 1714 | return ret; |
1c6fdbd8 KO |
1715 | } |
1716 | ||
b092dadd | 1717 | void bch2_fs_allocator_background_init(struct bch_fs *c) |
1c6fdbd8 | 1718 | { |
1c6fdbd8 KO |
1719 | spin_lock_init(&c->freelist_lock); |
1720 | bch2_bucket_clock_init(c, READ); | |
1721 | bch2_bucket_clock_init(c, WRITE); | |
1722 | ||
1c6fdbd8 KO |
1723 | c->pd_controllers_update_seconds = 5; |
1724 | INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update); | |
1725 | } |