Commit | Line | Data |
---|---|---|
7b3f84ea | 1 | // SPDX-License-Identifier: GPL-2.0 |
1c6fdbd8 | 2 | #include "bcachefs.h" |
7b3f84ea KO |
3 | #include "alloc_background.h" |
4 | #include "alloc_foreground.h" | |
1c6fdbd8 KO |
5 | #include "btree_cache.h" |
6 | #include "btree_io.h" | |
7 | #include "btree_update.h" | |
8 | #include "btree_update_interior.h" | |
9 | #include "btree_gc.h" | |
10 | #include "buckets.h" | |
1c6fdbd8 KO |
11 | #include "clock.h" |
12 | #include "debug.h" | |
1c6fdbd8 | 13 | #include "error.h" |
1c6fdbd8 | 14 | #include "journal_io.h" |
1c6fdbd8 KO |
15 | #include "trace.h" |
16 | ||
1c6fdbd8 KO |
17 | #include <linux/kthread.h> |
18 | #include <linux/math64.h> | |
19 | #include <linux/random.h> | |
20 | #include <linux/rculist.h> | |
21 | #include <linux/rcupdate.h> | |
22 | #include <linux/sched/task.h> | |
23 | #include <linux/sort.h> | |
24 | ||
25 | static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int); | |
26 | ||
27 | /* Ratelimiting/PD controllers */ | |
28 | ||
29 | static void pd_controllers_update(struct work_struct *work) | |
30 | { | |
31 | struct bch_fs *c = container_of(to_delayed_work(work), | |
32 | struct bch_fs, | |
33 | pd_controllers_update); | |
34 | struct bch_dev *ca; | |
35 | unsigned i; | |
36 | ||
37 | for_each_member_device(ca, c, i) { | |
38 | struct bch_dev_usage stats = bch2_dev_usage_read(c, ca); | |
39 | ||
40 | u64 free = bucket_to_sector(ca, | |
41 | __dev_buckets_free(ca, stats)) << 9; | |
42 | /* | |
43 | * Bytes of internal fragmentation, which can be | |
44 | * reclaimed by copy GC | |
45 | */ | |
46 | s64 fragmented = (bucket_to_sector(ca, | |
47 | stats.buckets[BCH_DATA_USER] + | |
48 | stats.buckets[BCH_DATA_CACHED]) - | |
49 | (stats.sectors[BCH_DATA_USER] + | |
50 | stats.sectors[BCH_DATA_CACHED])) << 9; | |
51 | ||
52 | fragmented = max(0LL, fragmented); | |
53 | ||
54 | bch2_pd_controller_update(&ca->copygc_pd, | |
55 | free, fragmented, -1); | |
56 | } | |
57 | ||
58 | schedule_delayed_work(&c->pd_controllers_update, | |
59 | c->pd_controllers_update_seconds * HZ); | |
60 | } | |
61 | ||
62 | /* Persistent alloc info: */ | |
63 | ||
64 | static unsigned bch_alloc_val_u64s(const struct bch_alloc *a) | |
65 | { | |
66 | unsigned bytes = offsetof(struct bch_alloc, data); | |
67 | ||
68 | if (a->fields & (1 << BCH_ALLOC_FIELD_READ_TIME)) | |
69 | bytes += 2; | |
70 | if (a->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME)) | |
71 | bytes += 2; | |
72 | ||
73 | return DIV_ROUND_UP(bytes, sizeof(u64)); | |
74 | } | |
75 | ||
76 | const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k) | |
77 | { | |
78 | if (k.k->p.inode >= c->sb.nr_devices || | |
79 | !c->devs[k.k->p.inode]) | |
80 | return "invalid device"; | |
81 | ||
82 | switch (k.k->type) { | |
83 | case BCH_ALLOC: { | |
84 | struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); | |
85 | ||
86 | if (bch_alloc_val_u64s(a.v) != bkey_val_u64s(a.k)) | |
87 | return "incorrect value size"; | |
88 | break; | |
89 | } | |
90 | default: | |
91 | return "invalid type"; | |
92 | } | |
93 | ||
94 | return NULL; | |
95 | } | |
96 | ||
319f9ac3 KO |
97 | void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, |
98 | struct bkey_s_c k) | |
1c6fdbd8 | 99 | { |
1c6fdbd8 | 100 | switch (k.k->type) { |
319f9ac3 KO |
101 | case BCH_ALLOC: { |
102 | struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); | |
103 | ||
104 | pr_buf(out, "gen %u", a.v->gen); | |
1c6fdbd8 KO |
105 | break; |
106 | } | |
319f9ac3 | 107 | } |
1c6fdbd8 KO |
108 | } |
109 | ||
110 | static inline unsigned get_alloc_field(const u8 **p, unsigned bytes) | |
111 | { | |
112 | unsigned v; | |
113 | ||
114 | switch (bytes) { | |
115 | case 1: | |
116 | v = **p; | |
117 | break; | |
118 | case 2: | |
119 | v = le16_to_cpup((void *) *p); | |
120 | break; | |
121 | case 4: | |
122 | v = le32_to_cpup((void *) *p); | |
123 | break; | |
124 | default: | |
125 | BUG(); | |
126 | } | |
127 | ||
128 | *p += bytes; | |
129 | return v; | |
130 | } | |
131 | ||
132 | static inline void put_alloc_field(u8 **p, unsigned bytes, unsigned v) | |
133 | { | |
134 | switch (bytes) { | |
135 | case 1: | |
136 | **p = v; | |
137 | break; | |
138 | case 2: | |
139 | *((__le16 *) *p) = cpu_to_le16(v); | |
140 | break; | |
141 | case 4: | |
142 | *((__le32 *) *p) = cpu_to_le32(v); | |
143 | break; | |
144 | default: | |
145 | BUG(); | |
146 | } | |
147 | ||
148 | *p += bytes; | |
149 | } | |
150 | ||
151 | static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k) | |
152 | { | |
153 | struct bch_dev *ca; | |
154 | struct bkey_s_c_alloc a; | |
155 | struct bucket_mark new; | |
156 | struct bucket *g; | |
157 | const u8 *d; | |
158 | ||
159 | if (k.k->type != BCH_ALLOC) | |
160 | return; | |
161 | ||
162 | a = bkey_s_c_to_alloc(k); | |
163 | ca = bch_dev_bkey_exists(c, a.k->p.inode); | |
164 | ||
165 | if (a.k->p.offset >= ca->mi.nbuckets) | |
166 | return; | |
167 | ||
168 | percpu_down_read(&c->usage_lock); | |
169 | ||
170 | g = bucket(ca, a.k->p.offset); | |
171 | bucket_cmpxchg(g, new, ({ | |
172 | new.gen = a.v->gen; | |
173 | new.gen_valid = 1; | |
174 | })); | |
175 | ||
176 | d = a.v->data; | |
177 | if (a.v->fields & (1 << BCH_ALLOC_FIELD_READ_TIME)) | |
178 | g->io_time[READ] = get_alloc_field(&d, 2); | |
179 | if (a.v->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME)) | |
180 | g->io_time[WRITE] = get_alloc_field(&d, 2); | |
181 | ||
182 | percpu_up_read(&c->usage_lock); | |
183 | } | |
184 | ||
185 | int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list) | |
186 | { | |
187 | struct journal_replay *r; | |
188 | struct btree_iter iter; | |
189 | struct bkey_s_c k; | |
190 | struct bch_dev *ca; | |
191 | unsigned i; | |
192 | int ret; | |
193 | ||
194 | for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) { | |
195 | bch2_alloc_read_key(c, k); | |
196 | bch2_btree_iter_cond_resched(&iter); | |
197 | } | |
198 | ||
199 | ret = bch2_btree_iter_unlock(&iter); | |
200 | if (ret) | |
201 | return ret; | |
202 | ||
203 | list_for_each_entry(r, journal_replay_list, list) { | |
204 | struct bkey_i *k, *n; | |
205 | struct jset_entry *entry; | |
206 | ||
207 | for_each_jset_key(k, n, entry, &r->j) | |
208 | if (entry->btree_id == BTREE_ID_ALLOC) | |
209 | bch2_alloc_read_key(c, bkey_i_to_s_c(k)); | |
210 | } | |
211 | ||
212 | mutex_lock(&c->bucket_clock[READ].lock); | |
213 | for_each_member_device(ca, c, i) { | |
214 | down_read(&ca->bucket_lock); | |
215 | bch2_recalc_oldest_io(c, ca, READ); | |
216 | up_read(&ca->bucket_lock); | |
217 | } | |
218 | mutex_unlock(&c->bucket_clock[READ].lock); | |
219 | ||
220 | mutex_lock(&c->bucket_clock[WRITE].lock); | |
221 | for_each_member_device(ca, c, i) { | |
222 | down_read(&ca->bucket_lock); | |
223 | bch2_recalc_oldest_io(c, ca, WRITE); | |
224 | up_read(&ca->bucket_lock); | |
225 | } | |
226 | mutex_unlock(&c->bucket_clock[WRITE].lock); | |
227 | ||
228 | return 0; | |
229 | } | |
230 | ||
231 | static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca, | |
232 | size_t b, struct btree_iter *iter, | |
b29e197a | 233 | u64 *journal_seq, unsigned flags) |
1c6fdbd8 KO |
234 | { |
235 | struct bucket_mark m; | |
236 | __BKEY_PADDED(k, DIV_ROUND_UP(sizeof(struct bch_alloc), 8)) alloc_key; | |
237 | struct bucket *g; | |
238 | struct bkey_i_alloc *a; | |
239 | u8 *d; | |
1c6fdbd8 | 240 | |
b29e197a KO |
241 | percpu_down_read(&c->usage_lock); |
242 | g = bucket(ca, b); | |
243 | ||
244 | m = READ_ONCE(g->mark); | |
245 | a = bkey_alloc_init(&alloc_key.k); | |
246 | a->k.p = POS(ca->dev_idx, b); | |
247 | a->v.fields = 0; | |
248 | a->v.gen = m.gen; | |
249 | set_bkey_val_u64s(&a->k, bch_alloc_val_u64s(&a->v)); | |
250 | ||
251 | d = a->v.data; | |
252 | if (a->v.fields & (1 << BCH_ALLOC_FIELD_READ_TIME)) | |
253 | put_alloc_field(&d, 2, g->io_time[READ]); | |
254 | if (a->v.fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME)) | |
255 | put_alloc_field(&d, 2, g->io_time[WRITE]); | |
256 | percpu_up_read(&c->usage_lock); | |
1c6fdbd8 | 257 | |
b29e197a | 258 | bch2_btree_iter_cond_resched(iter); |
1c6fdbd8 | 259 | |
b29e197a | 260 | bch2_btree_iter_set_pos(iter, a->k.p); |
1c6fdbd8 | 261 | |
fc3268c1 | 262 | return bch2_btree_insert_at(c, NULL, journal_seq, |
b29e197a KO |
263 | BTREE_INSERT_NOFAIL| |
264 | BTREE_INSERT_USE_RESERVE| | |
265 | BTREE_INSERT_USE_ALLOC_RESERVE| | |
266 | flags, | |
267 | BTREE_INSERT_ENTRY(iter, &a->k_i)); | |
1c6fdbd8 KO |
268 | } |
269 | ||
270 | int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos) | |
271 | { | |
272 | struct bch_dev *ca; | |
273 | struct btree_iter iter; | |
274 | int ret; | |
275 | ||
276 | if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode]) | |
277 | return 0; | |
278 | ||
279 | ca = bch_dev_bkey_exists(c, pos.inode); | |
280 | ||
281 | if (pos.offset >= ca->mi.nbuckets) | |
282 | return 0; | |
283 | ||
284 | bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN, | |
285 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
286 | ||
b29e197a | 287 | ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter, NULL, 0); |
1c6fdbd8 KO |
288 | bch2_btree_iter_unlock(&iter); |
289 | return ret; | |
290 | } | |
291 | ||
292 | int bch2_alloc_write(struct bch_fs *c) | |
293 | { | |
294 | struct bch_dev *ca; | |
295 | unsigned i; | |
296 | int ret = 0; | |
297 | ||
298 | for_each_rw_member(ca, c, i) { | |
299 | struct btree_iter iter; | |
300 | unsigned long bucket; | |
301 | ||
302 | bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN, | |
303 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
304 | ||
305 | down_read(&ca->bucket_lock); | |
306 | for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) { | |
b29e197a KO |
307 | ret = __bch2_alloc_write_key(c, ca, bucket, |
308 | &iter, NULL, 0); | |
1c6fdbd8 KO |
309 | if (ret) |
310 | break; | |
311 | ||
312 | clear_bit(bucket, ca->buckets_dirty); | |
313 | } | |
314 | up_read(&ca->bucket_lock); | |
315 | bch2_btree_iter_unlock(&iter); | |
316 | ||
317 | if (ret) { | |
318 | percpu_ref_put(&ca->io_ref); | |
319 | break; | |
320 | } | |
321 | } | |
322 | ||
323 | return ret; | |
324 | } | |
325 | ||
326 | /* Bucket IO clocks: */ | |
327 | ||
328 | static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw) | |
329 | { | |
330 | struct bucket_clock *clock = &c->bucket_clock[rw]; | |
331 | struct bucket_array *buckets = bucket_array(ca); | |
332 | struct bucket *g; | |
333 | u16 max_last_io = 0; | |
334 | unsigned i; | |
335 | ||
336 | lockdep_assert_held(&c->bucket_clock[rw].lock); | |
337 | ||
338 | /* Recalculate max_last_io for this device: */ | |
339 | for_each_bucket(g, buckets) | |
340 | max_last_io = max(max_last_io, bucket_last_io(c, g, rw)); | |
341 | ||
342 | ca->max_last_bucket_io[rw] = max_last_io; | |
343 | ||
344 | /* Recalculate global max_last_io: */ | |
345 | max_last_io = 0; | |
346 | ||
347 | for_each_member_device(ca, c, i) | |
348 | max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]); | |
349 | ||
350 | clock->max_last_io = max_last_io; | |
351 | } | |
352 | ||
353 | static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw) | |
354 | { | |
355 | struct bucket_clock *clock = &c->bucket_clock[rw]; | |
356 | struct bucket_array *buckets; | |
357 | struct bch_dev *ca; | |
358 | struct bucket *g; | |
359 | unsigned i; | |
360 | ||
361 | trace_rescale_prios(c); | |
362 | ||
363 | for_each_member_device(ca, c, i) { | |
364 | down_read(&ca->bucket_lock); | |
365 | buckets = bucket_array(ca); | |
366 | ||
367 | for_each_bucket(g, buckets) | |
368 | g->io_time[rw] = clock->hand - | |
369 | bucket_last_io(c, g, rw) / 2; | |
370 | ||
371 | bch2_recalc_oldest_io(c, ca, rw); | |
372 | ||
373 | up_read(&ca->bucket_lock); | |
374 | } | |
375 | } | |
376 | ||
8b335bae KO |
377 | static inline u64 bucket_clock_freq(u64 capacity) |
378 | { | |
379 | return max(capacity >> 10, 2028ULL); | |
380 | } | |
381 | ||
1c6fdbd8 KO |
382 | static void bch2_inc_clock_hand(struct io_timer *timer) |
383 | { | |
384 | struct bucket_clock *clock = container_of(timer, | |
385 | struct bucket_clock, rescale); | |
386 | struct bch_fs *c = container_of(clock, | |
387 | struct bch_fs, bucket_clock[clock->rw]); | |
388 | struct bch_dev *ca; | |
389 | u64 capacity; | |
390 | unsigned i; | |
391 | ||
392 | mutex_lock(&clock->lock); | |
393 | ||
394 | /* if clock cannot be advanced more, rescale prio */ | |
395 | if (clock->max_last_io >= U16_MAX - 2) | |
396 | bch2_rescale_bucket_io_times(c, clock->rw); | |
397 | ||
398 | BUG_ON(clock->max_last_io >= U16_MAX - 2); | |
399 | ||
400 | for_each_member_device(ca, c, i) | |
401 | ca->max_last_bucket_io[clock->rw]++; | |
402 | clock->max_last_io++; | |
403 | clock->hand++; | |
404 | ||
405 | mutex_unlock(&clock->lock); | |
406 | ||
407 | capacity = READ_ONCE(c->capacity); | |
408 | ||
409 | if (!capacity) | |
410 | return; | |
411 | ||
412 | /* | |
413 | * we only increment when 0.1% of the filesystem capacity has been read | |
414 | * or written too, this determines if it's time | |
415 | * | |
416 | * XXX: we shouldn't really be going off of the capacity of devices in | |
417 | * RW mode (that will be 0 when we're RO, yet we can still service | |
418 | * reads) | |
419 | */ | |
8b335bae | 420 | timer->expire += bucket_clock_freq(capacity); |
1c6fdbd8 KO |
421 | |
422 | bch2_io_timer_add(&c->io_clock[clock->rw], timer); | |
423 | } | |
424 | ||
425 | static void bch2_bucket_clock_init(struct bch_fs *c, int rw) | |
426 | { | |
427 | struct bucket_clock *clock = &c->bucket_clock[rw]; | |
428 | ||
429 | clock->hand = 1; | |
430 | clock->rw = rw; | |
431 | clock->rescale.fn = bch2_inc_clock_hand; | |
8b335bae | 432 | clock->rescale.expire = bucket_clock_freq(c->capacity); |
1c6fdbd8 KO |
433 | mutex_init(&clock->lock); |
434 | } | |
435 | ||
436 | /* Background allocator thread: */ | |
437 | ||
438 | /* | |
439 | * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens | |
440 | * (marking them as invalidated on disk), then optionally issues discard | |
441 | * commands to the newly free buckets, then puts them on the various freelists. | |
442 | */ | |
443 | ||
1c6fdbd8 KO |
444 | #define BUCKET_GC_GEN_MAX 96U |
445 | ||
446 | /** | |
447 | * wait_buckets_available - wait on reclaimable buckets | |
448 | * | |
449 | * If there aren't enough available buckets to fill up free_inc, wait until | |
450 | * there are. | |
451 | */ | |
452 | static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca) | |
453 | { | |
454 | unsigned long gc_count = c->gc_count; | |
455 | int ret = 0; | |
456 | ||
457 | while (1) { | |
458 | set_current_state(TASK_INTERRUPTIBLE); | |
459 | if (kthread_should_stop()) { | |
460 | ret = 1; | |
461 | break; | |
462 | } | |
463 | ||
464 | if (gc_count != c->gc_count) | |
465 | ca->inc_gen_really_needs_gc = 0; | |
466 | ||
467 | if ((ssize_t) (dev_buckets_available(c, ca) - | |
468 | ca->inc_gen_really_needs_gc) >= | |
469 | (ssize_t) fifo_free(&ca->free_inc)) | |
470 | break; | |
471 | ||
472 | up_read(&c->gc_lock); | |
473 | schedule(); | |
474 | try_to_freeze(); | |
475 | down_read(&c->gc_lock); | |
476 | } | |
477 | ||
478 | __set_current_state(TASK_RUNNING); | |
479 | return ret; | |
480 | } | |
481 | ||
482 | static bool bch2_can_invalidate_bucket(struct bch_dev *ca, | |
483 | size_t bucket, | |
484 | struct bucket_mark mark) | |
485 | { | |
486 | u8 gc_gen; | |
487 | ||
488 | if (!is_available_bucket(mark)) | |
489 | return false; | |
490 | ||
491 | gc_gen = bucket_gc_gen(ca, bucket); | |
492 | ||
493 | if (gc_gen >= BUCKET_GC_GEN_MAX / 2) | |
494 | ca->inc_gen_needs_gc++; | |
495 | ||
496 | if (gc_gen >= BUCKET_GC_GEN_MAX) | |
497 | ca->inc_gen_really_needs_gc++; | |
498 | ||
499 | return gc_gen < BUCKET_GC_GEN_MAX; | |
500 | } | |
501 | ||
1c6fdbd8 KO |
502 | /* |
503 | * Determines what order we're going to reuse buckets, smallest bucket_key() | |
504 | * first. | |
505 | * | |
506 | * | |
507 | * - We take into account the read prio of the bucket, which gives us an | |
508 | * indication of how hot the data is -- we scale the prio so that the prio | |
509 | * farthest from the clock is worth 1/8th of the closest. | |
510 | * | |
511 | * - The number of sectors of cached data in the bucket, which gives us an | |
512 | * indication of the cost in cache misses this eviction will cause. | |
513 | * | |
514 | * - If hotness * sectors used compares equal, we pick the bucket with the | |
515 | * smallest bucket_gc_gen() - since incrementing the same bucket's generation | |
516 | * number repeatedly forces us to run mark and sweep gc to avoid generation | |
517 | * number wraparound. | |
518 | */ | |
519 | ||
520 | static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca, | |
521 | size_t b, struct bucket_mark m) | |
522 | { | |
523 | unsigned last_io = bucket_last_io(c, bucket(ca, b), READ); | |
524 | unsigned max_last_io = ca->max_last_bucket_io[READ]; | |
525 | ||
526 | /* | |
527 | * Time since last read, scaled to [0, 8) where larger value indicates | |
528 | * more recently read data: | |
529 | */ | |
530 | unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io; | |
531 | ||
532 | /* How much we want to keep the data in this bucket: */ | |
533 | unsigned long data_wantness = | |
534 | (hotness + 1) * bucket_sectors_used(m); | |
535 | ||
536 | unsigned long needs_journal_commit = | |
537 | bucket_needs_journal_commit(m, c->journal.last_seq_ondisk); | |
538 | ||
539 | return (data_wantness << 9) | | |
540 | (needs_journal_commit << 8) | | |
f84306a5 | 541 | (bucket_gc_gen(ca, b) / 16); |
1c6fdbd8 KO |
542 | } |
543 | ||
544 | static inline int bucket_alloc_cmp(alloc_heap *h, | |
545 | struct alloc_heap_entry l, | |
546 | struct alloc_heap_entry r) | |
547 | { | |
548 | return (l.key > r.key) - (l.key < r.key) ?: | |
549 | (l.nr < r.nr) - (l.nr > r.nr) ?: | |
550 | (l.bucket > r.bucket) - (l.bucket < r.bucket); | |
551 | } | |
552 | ||
b29e197a KO |
553 | static inline int bucket_idx_cmp(const void *_l, const void *_r) |
554 | { | |
555 | const struct alloc_heap_entry *l = _l, *r = _r; | |
556 | ||
557 | return (l->bucket > r->bucket) - (l->bucket < r->bucket); | |
558 | } | |
559 | ||
1c6fdbd8 KO |
560 | static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) |
561 | { | |
562 | struct bucket_array *buckets; | |
563 | struct alloc_heap_entry e = { 0 }; | |
b29e197a | 564 | size_t b, i, nr = 0; |
1c6fdbd8 KO |
565 | |
566 | ca->alloc_heap.used = 0; | |
567 | ||
568 | mutex_lock(&c->bucket_clock[READ].lock); | |
569 | down_read(&ca->bucket_lock); | |
570 | ||
571 | buckets = bucket_array(ca); | |
572 | ||
573 | bch2_recalc_oldest_io(c, ca, READ); | |
574 | ||
575 | /* | |
576 | * Find buckets with lowest read priority, by building a maxheap sorted | |
577 | * by read priority and repeatedly replacing the maximum element until | |
578 | * all buckets have been visited. | |
579 | */ | |
580 | for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) { | |
581 | struct bucket_mark m = READ_ONCE(buckets->b[b].mark); | |
582 | unsigned long key = bucket_sort_key(c, ca, b, m); | |
583 | ||
584 | if (!bch2_can_invalidate_bucket(ca, b, m)) | |
585 | continue; | |
586 | ||
587 | if (e.nr && e.bucket + e.nr == b && e.key == key) { | |
588 | e.nr++; | |
589 | } else { | |
590 | if (e.nr) | |
198d6700 KO |
591 | heap_add_or_replace(&ca->alloc_heap, e, |
592 | -bucket_alloc_cmp, NULL); | |
1c6fdbd8 KO |
593 | |
594 | e = (struct alloc_heap_entry) { | |
595 | .bucket = b, | |
596 | .nr = 1, | |
597 | .key = key, | |
598 | }; | |
599 | } | |
600 | ||
601 | cond_resched(); | |
602 | } | |
603 | ||
604 | if (e.nr) | |
198d6700 KO |
605 | heap_add_or_replace(&ca->alloc_heap, e, |
606 | -bucket_alloc_cmp, NULL); | |
1c6fdbd8 | 607 | |
b29e197a KO |
608 | for (i = 0; i < ca->alloc_heap.used; i++) |
609 | nr += ca->alloc_heap.data[i].nr; | |
1c6fdbd8 | 610 | |
b29e197a KO |
611 | while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) { |
612 | nr -= ca->alloc_heap.data[0].nr; | |
198d6700 | 613 | heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL); |
1c6fdbd8 | 614 | } |
b29e197a KO |
615 | |
616 | up_read(&ca->bucket_lock); | |
617 | mutex_unlock(&c->bucket_clock[READ].lock); | |
1c6fdbd8 KO |
618 | } |
619 | ||
620 | static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca) | |
621 | { | |
622 | struct bucket_array *buckets = bucket_array(ca); | |
623 | struct bucket_mark m; | |
b29e197a | 624 | size_t b, start; |
1c6fdbd8 | 625 | |
b29e197a KO |
626 | if (ca->fifo_last_bucket < ca->mi.first_bucket || |
627 | ca->fifo_last_bucket >= ca->mi.nbuckets) | |
628 | ca->fifo_last_bucket = ca->mi.first_bucket; | |
629 | ||
630 | start = ca->fifo_last_bucket; | |
1c6fdbd8 | 631 | |
b29e197a KO |
632 | do { |
633 | ca->fifo_last_bucket++; | |
634 | if (ca->fifo_last_bucket == ca->mi.nbuckets) | |
635 | ca->fifo_last_bucket = ca->mi.first_bucket; | |
1c6fdbd8 | 636 | |
b29e197a | 637 | b = ca->fifo_last_bucket; |
1c6fdbd8 KO |
638 | m = READ_ONCE(buckets->b[b].mark); |
639 | ||
b29e197a KO |
640 | if (bch2_can_invalidate_bucket(ca, b, m)) { |
641 | struct alloc_heap_entry e = { .bucket = b, .nr = 1, }; | |
642 | ||
198d6700 | 643 | heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); |
b29e197a KO |
644 | if (heap_full(&ca->alloc_heap)) |
645 | break; | |
646 | } | |
1c6fdbd8 KO |
647 | |
648 | cond_resched(); | |
b29e197a | 649 | } while (ca->fifo_last_bucket != start); |
1c6fdbd8 KO |
650 | } |
651 | ||
652 | static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca) | |
653 | { | |
654 | struct bucket_array *buckets = bucket_array(ca); | |
655 | struct bucket_mark m; | |
b29e197a | 656 | size_t checked, i; |
1c6fdbd8 KO |
657 | |
658 | for (checked = 0; | |
b29e197a | 659 | checked < ca->mi.nbuckets / 2; |
1c6fdbd8 KO |
660 | checked++) { |
661 | size_t b = bch2_rand_range(ca->mi.nbuckets - | |
662 | ca->mi.first_bucket) + | |
663 | ca->mi.first_bucket; | |
664 | ||
665 | m = READ_ONCE(buckets->b[b].mark); | |
666 | ||
b29e197a KO |
667 | if (bch2_can_invalidate_bucket(ca, b, m)) { |
668 | struct alloc_heap_entry e = { .bucket = b, .nr = 1, }; | |
669 | ||
198d6700 | 670 | heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); |
b29e197a KO |
671 | if (heap_full(&ca->alloc_heap)) |
672 | break; | |
673 | } | |
1c6fdbd8 KO |
674 | |
675 | cond_resched(); | |
676 | } | |
b29e197a KO |
677 | |
678 | sort(ca->alloc_heap.data, | |
679 | ca->alloc_heap.used, | |
680 | sizeof(ca->alloc_heap.data[0]), | |
681 | bucket_idx_cmp, NULL); | |
682 | ||
683 | /* remove duplicates: */ | |
684 | for (i = 0; i + 1 < ca->alloc_heap.used; i++) | |
685 | if (ca->alloc_heap.data[i].bucket == | |
686 | ca->alloc_heap.data[i + 1].bucket) | |
687 | ca->alloc_heap.data[i].nr = 0; | |
1c6fdbd8 KO |
688 | } |
689 | ||
b29e197a | 690 | static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca) |
1c6fdbd8 | 691 | { |
b29e197a KO |
692 | size_t i, nr = 0; |
693 | ||
1c6fdbd8 | 694 | ca->inc_gen_needs_gc = 0; |
1c6fdbd8 KO |
695 | |
696 | switch (ca->mi.replacement) { | |
697 | case CACHE_REPLACEMENT_LRU: | |
698 | find_reclaimable_buckets_lru(c, ca); | |
699 | break; | |
700 | case CACHE_REPLACEMENT_FIFO: | |
701 | find_reclaimable_buckets_fifo(c, ca); | |
702 | break; | |
703 | case CACHE_REPLACEMENT_RANDOM: | |
704 | find_reclaimable_buckets_random(c, ca); | |
705 | break; | |
706 | } | |
b29e197a | 707 | |
198d6700 | 708 | heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL); |
b29e197a KO |
709 | |
710 | for (i = 0; i < ca->alloc_heap.used; i++) | |
711 | nr += ca->alloc_heap.data[i].nr; | |
712 | ||
713 | return nr; | |
1c6fdbd8 KO |
714 | } |
715 | ||
b29e197a | 716 | static inline long next_alloc_bucket(struct bch_dev *ca) |
1c6fdbd8 | 717 | { |
b29e197a KO |
718 | struct alloc_heap_entry e, *top = ca->alloc_heap.data; |
719 | ||
720 | while (ca->alloc_heap.used) { | |
721 | if (top->nr) { | |
722 | size_t b = top->bucket; | |
723 | ||
724 | top->bucket++; | |
725 | top->nr--; | |
726 | return b; | |
727 | } | |
1c6fdbd8 | 728 | |
198d6700 | 729 | heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); |
b29e197a KO |
730 | } |
731 | ||
732 | return -1; | |
1c6fdbd8 KO |
733 | } |
734 | ||
b29e197a KO |
735 | static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, |
736 | size_t bucket, u64 *flush_seq) | |
1c6fdbd8 | 737 | { |
b29e197a | 738 | struct bucket_mark m; |
1c6fdbd8 | 739 | |
b29e197a | 740 | percpu_down_read(&c->usage_lock); |
1c6fdbd8 | 741 | spin_lock(&c->freelist_lock); |
b29e197a KO |
742 | |
743 | bch2_invalidate_bucket(c, ca, bucket, &m); | |
744 | ||
745 | verify_not_on_freelist(c, ca, bucket); | |
746 | BUG_ON(!fifo_push(&ca->free_inc, bucket)); | |
747 | ||
1c6fdbd8 | 748 | spin_unlock(&c->freelist_lock); |
b29e197a KO |
749 | |
750 | bucket_io_clock_reset(c, ca, bucket, READ); | |
751 | bucket_io_clock_reset(c, ca, bucket, WRITE); | |
752 | ||
753 | percpu_up_read(&c->usage_lock); | |
754 | ||
755 | if (m.journal_seq_valid) { | |
756 | u64 journal_seq = atomic64_read(&c->journal.seq); | |
757 | u64 bucket_seq = journal_seq; | |
758 | ||
759 | bucket_seq &= ~((u64) U16_MAX); | |
760 | bucket_seq |= m.journal_seq; | |
761 | ||
762 | if (bucket_seq > journal_seq) | |
763 | bucket_seq -= 1 << 16; | |
764 | ||
765 | *flush_seq = max(*flush_seq, bucket_seq); | |
766 | } | |
767 | ||
768 | return m.cached_sectors != 0; | |
1c6fdbd8 KO |
769 | } |
770 | ||
b29e197a KO |
771 | /* |
772 | * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc: | |
773 | */ | |
774 | static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca) | |
1c6fdbd8 KO |
775 | { |
776 | struct btree_iter iter; | |
b29e197a | 777 | u64 journal_seq = 0; |
1c6fdbd8 | 778 | int ret = 0; |
b29e197a | 779 | long b; |
1c6fdbd8 KO |
780 | |
781 | bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), | |
782 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
783 | ||
784 | /* Only use nowait if we've already invalidated at least one bucket: */ | |
b29e197a KO |
785 | while (!ret && |
786 | !fifo_full(&ca->free_inc) && | |
787 | (b = next_alloc_bucket(ca)) >= 0) { | |
788 | bool must_flush = | |
789 | bch2_invalidate_one_bucket(c, ca, b, &journal_seq); | |
790 | ||
791 | ret = __bch2_alloc_write_key(c, ca, b, &iter, | |
792 | must_flush ? &journal_seq : NULL, | |
793 | !fifo_empty(&ca->free_inc) ? BTREE_INSERT_NOWAIT : 0); | |
1c6fdbd8 KO |
794 | } |
795 | ||
796 | bch2_btree_iter_unlock(&iter); | |
797 | ||
798 | /* If we used NOWAIT, don't return the error: */ | |
b29e197a KO |
799 | if (!fifo_empty(&ca->free_inc)) |
800 | ret = 0; | |
801 | if (ret) { | |
802 | bch_err(ca, "error invalidating buckets: %i", ret); | |
803 | return ret; | |
804 | } | |
1c6fdbd8 | 805 | |
b29e197a KO |
806 | if (journal_seq) |
807 | ret = bch2_journal_flush_seq(&c->journal, journal_seq); | |
808 | if (ret) { | |
809 | bch_err(ca, "journal error: %i", ret); | |
810 | return ret; | |
811 | } | |
1c6fdbd8 | 812 | |
b29e197a | 813 | return 0; |
1c6fdbd8 KO |
814 | } |
815 | ||
816 | static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket) | |
817 | { | |
b29e197a | 818 | unsigned i; |
1c6fdbd8 KO |
819 | int ret = 0; |
820 | ||
821 | while (1) { | |
822 | set_current_state(TASK_INTERRUPTIBLE); | |
823 | ||
b29e197a KO |
824 | spin_lock(&c->freelist_lock); |
825 | for (i = 0; i < RESERVE_NR; i++) | |
826 | if (fifo_push(&ca->free[i], bucket)) { | |
827 | fifo_pop(&ca->free_inc, bucket); | |
828 | closure_wake_up(&c->freelist_wait); | |
829 | spin_unlock(&c->freelist_lock); | |
830 | goto out; | |
831 | } | |
832 | spin_unlock(&c->freelist_lock); | |
1c6fdbd8 KO |
833 | |
834 | if ((current->flags & PF_KTHREAD) && | |
835 | kthread_should_stop()) { | |
836 | ret = 1; | |
837 | break; | |
838 | } | |
839 | ||
840 | schedule(); | |
841 | try_to_freeze(); | |
842 | } | |
b29e197a | 843 | out: |
1c6fdbd8 KO |
844 | __set_current_state(TASK_RUNNING); |
845 | return ret; | |
846 | } | |
847 | ||
848 | /* | |
b29e197a KO |
849 | * Pulls buckets off free_inc, discards them (if enabled), then adds them to |
850 | * freelists, waiting until there's room if necessary: | |
1c6fdbd8 KO |
851 | */ |
852 | static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca) | |
853 | { | |
b29e197a | 854 | while (!fifo_empty(&ca->free_inc)) { |
1c6fdbd8 KO |
855 | size_t bucket = fifo_peek(&ca->free_inc); |
856 | ||
1c6fdbd8 KO |
857 | if (ca->mi.discard && |
858 | bdev_max_discard_sectors(ca->disk_sb.bdev)) | |
859 | blkdev_issue_discard(ca->disk_sb.bdev, | |
860 | bucket_to_sector(ca, bucket), | |
861 | ca->mi.bucket_size, GFP_NOIO); | |
862 | ||
863 | if (push_invalidated_bucket(c, ca, bucket)) | |
864 | return 1; | |
865 | } | |
866 | ||
867 | return 0; | |
868 | } | |
869 | ||
870 | /** | |
871 | * bch_allocator_thread - move buckets from free_inc to reserves | |
872 | * | |
873 | * The free_inc FIFO is populated by find_reclaimable_buckets(), and | |
874 | * the reserves are depleted by bucket allocation. When we run out | |
875 | * of free_inc, try to invalidate some buckets and write out | |
876 | * prios and gens. | |
877 | */ | |
878 | static int bch2_allocator_thread(void *arg) | |
879 | { | |
880 | struct bch_dev *ca = arg; | |
881 | struct bch_fs *c = ca->fs; | |
b29e197a | 882 | size_t nr; |
1c6fdbd8 KO |
883 | int ret; |
884 | ||
885 | set_freezable(); | |
886 | ||
887 | while (1) { | |
b29e197a | 888 | cond_resched(); |
1c6fdbd8 | 889 | |
b29e197a KO |
890 | pr_debug("discarding %zu invalidated buckets", |
891 | fifo_used(&ca->free_inc)); | |
1c6fdbd8 | 892 | |
b29e197a KO |
893 | ret = discard_invalidated_buckets(c, ca); |
894 | if (ret) | |
895 | goto stop; | |
1c6fdbd8 | 896 | |
94c1f4ad KO |
897 | down_read(&c->gc_lock); |
898 | ||
b29e197a | 899 | ret = bch2_invalidate_buckets(c, ca); |
94c1f4ad KO |
900 | if (ret) { |
901 | up_read(&c->gc_lock); | |
b29e197a | 902 | goto stop; |
94c1f4ad | 903 | } |
1c6fdbd8 | 904 | |
94c1f4ad KO |
905 | if (!fifo_empty(&ca->free_inc)) { |
906 | up_read(&c->gc_lock); | |
b29e197a | 907 | continue; |
94c1f4ad | 908 | } |
1c6fdbd8 KO |
909 | |
910 | pr_debug("free_inc now empty"); | |
911 | ||
b29e197a | 912 | do { |
1c6fdbd8 KO |
913 | if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) { |
914 | up_read(&c->gc_lock); | |
915 | bch_err(ca, "gc failure"); | |
916 | goto stop; | |
917 | } | |
918 | ||
919 | /* | |
920 | * Find some buckets that we can invalidate, either | |
921 | * they're completely unused, or only contain clean data | |
922 | * that's been written back to the backing device or | |
923 | * another cache tier | |
924 | */ | |
925 | ||
926 | pr_debug("scanning for reclaimable buckets"); | |
927 | ||
b29e197a | 928 | nr = find_reclaimable_buckets(c, ca); |
1c6fdbd8 | 929 | |
b29e197a | 930 | pr_debug("found %zu buckets", nr); |
1c6fdbd8 | 931 | |
b29e197a | 932 | trace_alloc_batch(ca, nr, ca->alloc_heap.size); |
1c6fdbd8 | 933 | |
b29e197a KO |
934 | if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) || |
935 | ca->inc_gen_really_needs_gc) && | |
1c6fdbd8 KO |
936 | c->gc_thread) { |
937 | atomic_inc(&c->kick_gc); | |
938 | wake_up_process(c->gc_thread); | |
939 | } | |
940 | ||
1c6fdbd8 | 941 | /* |
b29e197a KO |
942 | * If we found any buckets, we have to invalidate them |
943 | * before we scan for more - but if we didn't find very | |
944 | * many we may want to wait on more buckets being | |
945 | * available so we don't spin: | |
1c6fdbd8 | 946 | */ |
b29e197a KO |
947 | if (!nr || |
948 | (nr < ALLOC_SCAN_BATCH(ca) && | |
949 | !fifo_full(&ca->free[RESERVE_MOVINGGC]))) { | |
950 | ca->allocator_blocked = true; | |
951 | closure_wake_up(&c->freelist_wait); | |
952 | ||
953 | ret = wait_buckets_available(c, ca); | |
954 | if (ret) { | |
955 | up_read(&c->gc_lock); | |
956 | goto stop; | |
957 | } | |
1c6fdbd8 | 958 | } |
b29e197a | 959 | } while (!nr); |
1c6fdbd8 KO |
960 | |
961 | ca->allocator_blocked = false; | |
962 | up_read(&c->gc_lock); | |
963 | ||
b29e197a | 964 | pr_debug("%zu buckets to invalidate", nr); |
1c6fdbd8 KO |
965 | |
966 | /* | |
b29e197a | 967 | * alloc_heap is now full of newly-invalidated buckets: next, |
1c6fdbd8 KO |
968 | * write out the new bucket gens: |
969 | */ | |
970 | } | |
971 | ||
972 | stop: | |
973 | pr_debug("alloc thread stopping (ret %i)", ret); | |
974 | return 0; | |
975 | } | |
976 | ||
1c6fdbd8 KO |
977 | /* Startup/shutdown (ro/rw): */ |
978 | ||
979 | void bch2_recalc_capacity(struct bch_fs *c) | |
980 | { | |
981 | struct bch_dev *ca; | |
a50ed7c8 | 982 | u64 capacity = 0, reserved_sectors = 0, gc_reserve; |
b092dadd | 983 | unsigned bucket_size_max = 0; |
1c6fdbd8 KO |
984 | unsigned long ra_pages = 0; |
985 | unsigned i, j; | |
986 | ||
987 | lockdep_assert_held(&c->state_lock); | |
988 | ||
989 | for_each_online_member(ca, c, i) { | |
990 | struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; | |
991 | ||
992 | ra_pages += bdi->ra_pages; | |
993 | } | |
994 | ||
995 | bch2_set_ra_pages(c, ra_pages); | |
996 | ||
997 | for_each_rw_member(ca, c, i) { | |
a50ed7c8 | 998 | u64 dev_reserve = 0; |
1c6fdbd8 KO |
999 | |
1000 | /* | |
1001 | * We need to reserve buckets (from the number | |
1002 | * of currently available buckets) against | |
1003 | * foreground writes so that mainly copygc can | |
1004 | * make forward progress. | |
1005 | * | |
1006 | * We need enough to refill the various reserves | |
1007 | * from scratch - copygc will use its entire | |
1008 | * reserve all at once, then run against when | |
1009 | * its reserve is refilled (from the formerly | |
1010 | * available buckets). | |
1011 | * | |
1012 | * This reserve is just used when considering if | |
1013 | * allocations for foreground writes must wait - | |
1014 | * not -ENOSPC calculations. | |
1015 | */ | |
1016 | for (j = 0; j < RESERVE_NONE; j++) | |
a9bec520 | 1017 | dev_reserve += ca->free[j].size; |
1c6fdbd8 | 1018 | |
a9bec520 KO |
1019 | dev_reserve += 1; /* btree write point */ |
1020 | dev_reserve += 1; /* copygc write point */ | |
1021 | dev_reserve += 1; /* rebalance write point */ | |
1c6fdbd8 | 1022 | |
a9bec520 | 1023 | dev_reserve *= ca->mi.bucket_size; |
1c6fdbd8 | 1024 | |
a50ed7c8 | 1025 | ca->copygc_threshold = dev_reserve; |
a9bec520 | 1026 | |
a50ed7c8 KO |
1027 | capacity += bucket_to_sector(ca, ca->mi.nbuckets - |
1028 | ca->mi.first_bucket); | |
1c6fdbd8 | 1029 | |
a50ed7c8 | 1030 | reserved_sectors += dev_reserve * 2; |
b092dadd KO |
1031 | |
1032 | bucket_size_max = max_t(unsigned, bucket_size_max, | |
1033 | ca->mi.bucket_size); | |
a9bec520 | 1034 | } |
1c6fdbd8 | 1035 | |
a50ed7c8 KO |
1036 | gc_reserve = c->opts.gc_reserve_bytes |
1037 | ? c->opts.gc_reserve_bytes >> 9 | |
1038 | : div64_u64(capacity * c->opts.gc_reserve_percent, 100); | |
1039 | ||
1040 | reserved_sectors = max(gc_reserve, reserved_sectors); | |
1c6fdbd8 | 1041 | |
a50ed7c8 | 1042 | reserved_sectors = min(reserved_sectors, capacity); |
1c6fdbd8 | 1043 | |
a9bec520 | 1044 | c->capacity = capacity - reserved_sectors; |
1c6fdbd8 | 1045 | |
b092dadd KO |
1046 | c->bucket_size_max = bucket_size_max; |
1047 | ||
1c6fdbd8 KO |
1048 | if (c->capacity) { |
1049 | bch2_io_timer_add(&c->io_clock[READ], | |
1050 | &c->bucket_clock[READ].rescale); | |
1051 | bch2_io_timer_add(&c->io_clock[WRITE], | |
1052 | &c->bucket_clock[WRITE].rescale); | |
1053 | } else { | |
1054 | bch2_io_timer_del(&c->io_clock[READ], | |
1055 | &c->bucket_clock[READ].rescale); | |
1056 | bch2_io_timer_del(&c->io_clock[WRITE], | |
1057 | &c->bucket_clock[WRITE].rescale); | |
1058 | } | |
1059 | ||
1060 | /* Wake up case someone was waiting for buckets */ | |
1061 | closure_wake_up(&c->freelist_wait); | |
1062 | } | |
1063 | ||
1c6fdbd8 KO |
1064 | static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) |
1065 | { | |
1066 | struct open_bucket *ob; | |
1067 | bool ret = false; | |
1068 | ||
1069 | for (ob = c->open_buckets; | |
1070 | ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); | |
1071 | ob++) { | |
1072 | spin_lock(&ob->lock); | |
1073 | if (ob->valid && !ob->on_partial_list && | |
1074 | ob->ptr.dev == ca->dev_idx) | |
1075 | ret = true; | |
1076 | spin_unlock(&ob->lock); | |
1077 | } | |
1078 | ||
1079 | return ret; | |
1080 | } | |
1081 | ||
1082 | /* device goes ro: */ | |
1083 | void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) | |
1084 | { | |
1085 | unsigned i; | |
1086 | ||
1087 | BUG_ON(ca->alloc_thread); | |
1088 | ||
1089 | /* First, remove device from allocation groups: */ | |
1090 | ||
1091 | for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++) | |
1092 | clear_bit(ca->dev_idx, c->rw_devs[i].d); | |
1093 | ||
1094 | /* | |
1095 | * Capacity is calculated based off of devices in allocation groups: | |
1096 | */ | |
1097 | bch2_recalc_capacity(c); | |
1098 | ||
1099 | /* Next, close write points that point to this device... */ | |
1100 | for (i = 0; i < ARRAY_SIZE(c->write_points); i++) | |
7b3f84ea | 1101 | bch2_writepoint_stop(c, ca, &c->write_points[i]); |
1c6fdbd8 | 1102 | |
7b3f84ea KO |
1103 | bch2_writepoint_stop(c, ca, &ca->copygc_write_point); |
1104 | bch2_writepoint_stop(c, ca, &c->rebalance_write_point); | |
1105 | bch2_writepoint_stop(c, ca, &c->btree_write_point); | |
1c6fdbd8 KO |
1106 | |
1107 | mutex_lock(&c->btree_reserve_cache_lock); | |
1108 | while (c->btree_reserve_cache_nr) { | |
1109 | struct btree_alloc *a = | |
1110 | &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; | |
1111 | ||
ef337c54 | 1112 | bch2_open_buckets_put(c, &a->ob); |
1c6fdbd8 KO |
1113 | } |
1114 | mutex_unlock(&c->btree_reserve_cache_lock); | |
1115 | ||
1116 | /* | |
1117 | * Wake up threads that were blocked on allocation, so they can notice | |
1118 | * the device can no longer be removed and the capacity has changed: | |
1119 | */ | |
1120 | closure_wake_up(&c->freelist_wait); | |
1121 | ||
1122 | /* | |
1123 | * journal_res_get() can block waiting for free space in the journal - | |
1124 | * it needs to notice there may not be devices to allocate from anymore: | |
1125 | */ | |
1126 | wake_up(&c->journal.wait); | |
1127 | ||
1128 | /* Now wait for any in flight writes: */ | |
1129 | ||
1130 | closure_wait_event(&c->open_buckets_wait, | |
1131 | !bch2_dev_has_open_write_point(c, ca)); | |
1132 | } | |
1133 | ||
1134 | /* device goes rw: */ | |
1135 | void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) | |
1136 | { | |
1137 | unsigned i; | |
1138 | ||
1139 | for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++) | |
1140 | if (ca->mi.data_allowed & (1 << i)) | |
1141 | set_bit(ca->dev_idx, c->rw_devs[i].d); | |
1142 | } | |
1143 | ||
1144 | /* stop allocator thread: */ | |
1145 | void bch2_dev_allocator_stop(struct bch_dev *ca) | |
1146 | { | |
1147 | struct task_struct *p; | |
1148 | ||
1149 | p = rcu_dereference_protected(ca->alloc_thread, 1); | |
1150 | ca->alloc_thread = NULL; | |
1151 | ||
1152 | /* | |
1153 | * We need an rcu barrier between setting ca->alloc_thread = NULL and | |
1154 | * the thread shutting down to avoid bch2_wake_allocator() racing: | |
1155 | * | |
1156 | * XXX: it would be better to have the rcu barrier be asynchronous | |
1157 | * instead of blocking us here | |
1158 | */ | |
1159 | synchronize_rcu(); | |
1160 | ||
1161 | if (p) { | |
1162 | kthread_stop(p); | |
1163 | put_task_struct(p); | |
1164 | } | |
1165 | } | |
1166 | ||
1167 | /* start allocator thread: */ | |
1168 | int bch2_dev_allocator_start(struct bch_dev *ca) | |
1169 | { | |
1170 | struct task_struct *p; | |
1171 | ||
1172 | /* | |
1173 | * allocator thread already started? | |
1174 | */ | |
1175 | if (ca->alloc_thread) | |
1176 | return 0; | |
1177 | ||
1178 | p = kthread_create(bch2_allocator_thread, ca, | |
1179 | "bch_alloc[%s]", ca->name); | |
1180 | if (IS_ERR(p)) | |
1181 | return PTR_ERR(p); | |
1182 | ||
1183 | get_task_struct(p); | |
1184 | rcu_assign_pointer(ca->alloc_thread, p); | |
1185 | wake_up_process(p); | |
1186 | return 0; | |
1187 | } | |
1188 | ||
b29e197a KO |
1189 | static void flush_held_btree_writes(struct bch_fs *c) |
1190 | { | |
1191 | struct bucket_table *tbl; | |
1192 | struct rhash_head *pos; | |
1193 | struct btree *b; | |
1194 | bool flush_updates; | |
1195 | size_t i, nr_pending_updates; | |
1196 | ||
1197 | clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags); | |
1198 | again: | |
1199 | pr_debug("flushing dirty btree nodes"); | |
1200 | cond_resched(); | |
1201 | ||
1202 | flush_updates = false; | |
1203 | nr_pending_updates = bch2_btree_interior_updates_nr_pending(c); | |
1204 | ||
1205 | rcu_read_lock(); | |
1206 | for_each_cached_btree(b, c, tbl, i, pos) | |
1207 | if (btree_node_dirty(b) && (!b->written || b->level)) { | |
1208 | if (btree_node_may_write(b)) { | |
1209 | rcu_read_unlock(); | |
1210 | btree_node_lock_type(c, b, SIX_LOCK_read); | |
1211 | bch2_btree_node_write(c, b, SIX_LOCK_read); | |
1212 | six_unlock_read(&b->lock); | |
1213 | goto again; | |
1214 | } else { | |
1215 | flush_updates = true; | |
1216 | } | |
1217 | } | |
1218 | rcu_read_unlock(); | |
1219 | ||
1220 | if (c->btree_roots_dirty) | |
1221 | bch2_journal_meta(&c->journal); | |
1222 | ||
1223 | /* | |
1224 | * This is ugly, but it's needed to flush btree node writes | |
1225 | * without spinning... | |
1226 | */ | |
1227 | if (flush_updates) { | |
1228 | closure_wait_event(&c->btree_interior_update_wait, | |
1229 | bch2_btree_interior_updates_nr_pending(c) < | |
1230 | nr_pending_updates); | |
1231 | goto again; | |
1232 | } | |
1233 | ||
1234 | } | |
1235 | ||
1c6fdbd8 KO |
1236 | static void allocator_start_issue_discards(struct bch_fs *c) |
1237 | { | |
1238 | struct bch_dev *ca; | |
1239 | unsigned dev_iter; | |
b29e197a | 1240 | size_t bu; |
1c6fdbd8 | 1241 | |
b29e197a KO |
1242 | for_each_rw_member(ca, c, dev_iter) |
1243 | while (fifo_pop(&ca->free_inc, bu)) | |
1c6fdbd8 KO |
1244 | blkdev_issue_discard(ca->disk_sb.bdev, |
1245 | bucket_to_sector(ca, bu), | |
1246 | ca->mi.bucket_size, GFP_NOIO); | |
1c6fdbd8 KO |
1247 | } |
1248 | ||
1249 | static int __bch2_fs_allocator_start(struct bch_fs *c) | |
1250 | { | |
1251 | struct bch_dev *ca; | |
1c6fdbd8 KO |
1252 | unsigned dev_iter; |
1253 | u64 journal_seq = 0; | |
b29e197a | 1254 | long bu; |
1c6fdbd8 KO |
1255 | bool invalidating_data = false; |
1256 | int ret = 0; | |
1257 | ||
1258 | if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) | |
1259 | return -1; | |
1260 | ||
b29e197a KO |
1261 | if (test_alloc_startup(c)) { |
1262 | invalidating_data = true; | |
1263 | goto not_enough; | |
1264 | } | |
1265 | ||
1c6fdbd8 KO |
1266 | /* Scan for buckets that are already invalidated: */ |
1267 | for_each_rw_member(ca, c, dev_iter) { | |
1268 | struct btree_iter iter; | |
1269 | struct bucket_mark m; | |
1270 | struct bkey_s_c k; | |
1271 | ||
1272 | for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), 0, k) { | |
1273 | if (k.k->type != BCH_ALLOC) | |
1274 | continue; | |
1275 | ||
1276 | bu = k.k->p.offset; | |
1277 | m = READ_ONCE(bucket(ca, bu)->mark); | |
1278 | ||
1279 | if (!is_available_bucket(m) || m.cached_sectors) | |
1280 | continue; | |
1281 | ||
1282 | percpu_down_read(&c->usage_lock); | |
1283 | bch2_mark_alloc_bucket(c, ca, bu, true, | |
1284 | gc_pos_alloc(c, NULL), | |
1285 | BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE| | |
1286 | BCH_BUCKET_MARK_GC_LOCK_HELD); | |
1287 | percpu_up_read(&c->usage_lock); | |
1288 | ||
1289 | fifo_push(&ca->free_inc, bu); | |
1c6fdbd8 KO |
1290 | |
1291 | if (fifo_full(&ca->free_inc)) | |
1292 | break; | |
1293 | } | |
1294 | bch2_btree_iter_unlock(&iter); | |
1295 | } | |
1296 | ||
1297 | /* did we find enough buckets? */ | |
1298 | for_each_rw_member(ca, c, dev_iter) | |
1299 | if (fifo_used(&ca->free_inc) < ca->free[RESERVE_BTREE].size) { | |
1300 | percpu_ref_put(&ca->io_ref); | |
1301 | goto not_enough; | |
1302 | } | |
1303 | ||
1304 | return 0; | |
1305 | not_enough: | |
1306 | pr_debug("did not find enough empty buckets; issuing discards"); | |
1307 | ||
b29e197a | 1308 | /* clear out free_inc, we'll be using it again below: */ |
1c6fdbd8 KO |
1309 | for_each_rw_member(ca, c, dev_iter) |
1310 | discard_invalidated_buckets(c, ca); | |
1311 | ||
1312 | pr_debug("scanning for reclaimable buckets"); | |
1313 | ||
1314 | for_each_rw_member(ca, c, dev_iter) { | |
1c6fdbd8 | 1315 | find_reclaimable_buckets(c, ca); |
1c6fdbd8 | 1316 | |
b29e197a KO |
1317 | while (!fifo_full(&ca->free[RESERVE_BTREE]) && |
1318 | (bu = next_alloc_bucket(ca)) >= 0) { | |
1319 | invalidating_data |= | |
1320 | bch2_invalidate_one_bucket(c, ca, bu, &journal_seq); | |
1c6fdbd8 | 1321 | |
b29e197a KO |
1322 | fifo_push(&ca->free[RESERVE_BTREE], bu); |
1323 | set_bit(bu, ca->buckets_dirty); | |
1324 | } | |
1c6fdbd8 KO |
1325 | } |
1326 | ||
1327 | pr_debug("done scanning for reclaimable buckets"); | |
1328 | ||
1329 | /* | |
1330 | * We're moving buckets to freelists _before_ they've been marked as | |
1331 | * invalidated on disk - we have to so that we can allocate new btree | |
1332 | * nodes to mark them as invalidated on disk. | |
1333 | * | |
1334 | * However, we can't _write_ to any of these buckets yet - they might | |
1335 | * have cached data in them, which is live until they're marked as | |
1336 | * invalidated on disk: | |
1337 | */ | |
1338 | if (invalidating_data) { | |
1339 | pr_debug("invalidating existing data"); | |
1340 | set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags); | |
1341 | } else { | |
1342 | pr_debug("issuing discards"); | |
1343 | allocator_start_issue_discards(c); | |
1344 | } | |
1345 | ||
1346 | /* | |
1347 | * XXX: it's possible for this to deadlock waiting on journal reclaim, | |
1348 | * since we're holding btree writes. What then? | |
1349 | */ | |
b29e197a KO |
1350 | ret = bch2_alloc_write(c); |
1351 | if (ret) | |
1352 | return ret; | |
1c6fdbd8 KO |
1353 | |
1354 | if (invalidating_data) { | |
1355 | pr_debug("flushing journal"); | |
1356 | ||
1357 | ret = bch2_journal_flush_seq(&c->journal, journal_seq); | |
1358 | if (ret) | |
1359 | return ret; | |
1360 | ||
1361 | pr_debug("issuing discards"); | |
1362 | allocator_start_issue_discards(c); | |
1363 | } | |
1364 | ||
1c6fdbd8 KO |
1365 | set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags); |
1366 | ||
1367 | /* now flush dirty btree nodes: */ | |
b29e197a KO |
1368 | if (invalidating_data) |
1369 | flush_held_btree_writes(c); | |
1c6fdbd8 KO |
1370 | |
1371 | return 0; | |
1372 | } | |
1373 | ||
1374 | int bch2_fs_allocator_start(struct bch_fs *c) | |
1375 | { | |
1376 | struct bch_dev *ca; | |
1377 | unsigned i; | |
1378 | int ret; | |
1379 | ||
1380 | down_read(&c->gc_lock); | |
1381 | ret = __bch2_fs_allocator_start(c); | |
1382 | up_read(&c->gc_lock); | |
1383 | ||
1384 | if (ret) | |
1385 | return ret; | |
1386 | ||
1387 | for_each_rw_member(ca, c, i) { | |
1388 | ret = bch2_dev_allocator_start(ca); | |
1389 | if (ret) { | |
1390 | percpu_ref_put(&ca->io_ref); | |
1391 | return ret; | |
1392 | } | |
1393 | } | |
1394 | ||
1395 | return bch2_alloc_write(c); | |
1396 | } | |
1397 | ||
b092dadd | 1398 | void bch2_fs_allocator_background_init(struct bch_fs *c) |
1c6fdbd8 | 1399 | { |
1c6fdbd8 KO |
1400 | spin_lock_init(&c->freelist_lock); |
1401 | bch2_bucket_clock_init(c, READ); | |
1402 | bch2_bucket_clock_init(c, WRITE); | |
1403 | ||
1c6fdbd8 KO |
1404 | c->pd_controllers_update_seconds = 5; |
1405 | INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update); | |
1406 | } |