bcachefs: Fix a deadlock
[linux-block.git] / fs / bcachefs / alloc_background.c
CommitLineData
7b3f84ea 1// SPDX-License-Identifier: GPL-2.0
1c6fdbd8 2#include "bcachefs.h"
7b3f84ea
KO
3#include "alloc_background.h"
4#include "alloc_foreground.h"
1c6fdbd8
KO
5#include "btree_cache.h"
6#include "btree_io.h"
7#include "btree_update.h"
8#include "btree_update_interior.h"
9#include "btree_gc.h"
10#include "buckets.h"
1c6fdbd8
KO
11#include "clock.h"
12#include "debug.h"
cd575ddf 13#include "ec.h"
1c6fdbd8 14#include "error.h"
1c6fdbd8 15#include "journal_io.h"
1c6fdbd8
KO
16#include "trace.h"
17
1c6fdbd8
KO
18#include <linux/kthread.h>
19#include <linux/math64.h>
20#include <linux/random.h>
21#include <linux/rculist.h>
22#include <linux/rcupdate.h>
23#include <linux/sched/task.h>
24#include <linux/sort.h>
25
90541a74
KO
26static const char * const bch2_alloc_field_names[] = {
27#define x(name, bytes) #name,
28 BCH_ALLOC_FIELDS()
29#undef x
30 NULL
31};
32
1c6fdbd8
KO
33static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
34
35/* Ratelimiting/PD controllers */
36
37static void pd_controllers_update(struct work_struct *work)
38{
39 struct bch_fs *c = container_of(to_delayed_work(work),
40 struct bch_fs,
41 pd_controllers_update);
42 struct bch_dev *ca;
43 unsigned i;
44
45 for_each_member_device(ca, c, i) {
46 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
47
48 u64 free = bucket_to_sector(ca,
49 __dev_buckets_free(ca, stats)) << 9;
50 /*
51 * Bytes of internal fragmentation, which can be
52 * reclaimed by copy GC
53 */
54 s64 fragmented = (bucket_to_sector(ca,
55 stats.buckets[BCH_DATA_USER] +
56 stats.buckets[BCH_DATA_CACHED]) -
57 (stats.sectors[BCH_DATA_USER] +
58 stats.sectors[BCH_DATA_CACHED])) << 9;
59
60 fragmented = max(0LL, fragmented);
61
62 bch2_pd_controller_update(&ca->copygc_pd,
63 free, fragmented, -1);
64 }
65
66 schedule_delayed_work(&c->pd_controllers_update,
67 c->pd_controllers_update_seconds * HZ);
68}
69
70/* Persistent alloc info: */
71
90541a74
KO
72static inline u64 get_alloc_field(const struct bch_alloc *a,
73 const void **p, unsigned field)
74{
75 unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
76 u64 v;
77
78 if (!(a->fields & (1 << field)))
79 return 0;
80
81 switch (bytes) {
82 case 1:
83 v = *((const u8 *) *p);
84 break;
85 case 2:
86 v = le16_to_cpup(*p);
87 break;
88 case 4:
89 v = le32_to_cpup(*p);
90 break;
91 case 8:
92 v = le64_to_cpup(*p);
93 break;
94 default:
95 BUG();
96 }
97
98 *p += bytes;
99 return v;
100}
101
102static inline void put_alloc_field(struct bkey_i_alloc *a, void **p,
103 unsigned field, u64 v)
104{
105 unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
106
107 if (!v)
108 return;
109
110 a->v.fields |= 1 << field;
111
112 switch (bytes) {
113 case 1:
114 *((u8 *) *p) = v;
115 break;
116 case 2:
117 *((__le16 *) *p) = cpu_to_le16(v);
118 break;
119 case 4:
120 *((__le32 *) *p) = cpu_to_le32(v);
121 break;
122 case 8:
123 *((__le64 *) *p) = cpu_to_le64(v);
124 break;
125 default:
126 BUG();
127 }
128
129 *p += bytes;
130}
131
8fe826f9
KO
132struct bkey_alloc_unpacked bch2_alloc_unpack(const struct bch_alloc *a)
133{
134 struct bkey_alloc_unpacked ret = { .gen = a->gen };
135 const void *d = a->data;
136 unsigned idx = 0;
137
138#define x(_name, _bits) ret._name = get_alloc_field(a, &d, idx++);
139 BCH_ALLOC_FIELDS()
140#undef x
141 return ret;
142}
143
144static void bch2_alloc_pack(struct bkey_i_alloc *dst,
145 const struct bkey_alloc_unpacked src)
146{
147 unsigned idx = 0;
148 void *d = dst->v.data;
149
150 dst->v.fields = 0;
151 dst->v.gen = src.gen;
152
153#define x(_name, _bits) put_alloc_field(dst, &d, idx++, src._name);
154 BCH_ALLOC_FIELDS()
155#undef x
156
157 set_bkey_val_bytes(&dst->k, (void *) d - (void *) &dst->v);
158}
159
1c6fdbd8
KO
160static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
161{
90541a74 162 unsigned i, bytes = offsetof(struct bch_alloc, data);
1c6fdbd8 163
90541a74
KO
164 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_FIELD_BYTES); i++)
165 if (a->fields & (1 << i))
166 bytes += BCH_ALLOC_FIELD_BYTES[i];
1c6fdbd8
KO
167
168 return DIV_ROUND_UP(bytes, sizeof(u64));
169}
170
171const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
172{
26609b61
KO
173 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
174
1c6fdbd8
KO
175 if (k.k->p.inode >= c->sb.nr_devices ||
176 !c->devs[k.k->p.inode])
177 return "invalid device";
178
26609b61
KO
179 /* allow for unknown fields */
180 if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
181 return "incorrect value size";
1c6fdbd8
KO
182
183 return NULL;
184}
185
319f9ac3
KO
186void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
187 struct bkey_s_c k)
1c6fdbd8 188{
26609b61 189 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
90541a74
KO
190 const void *d = a.v->data;
191 unsigned i;
319f9ac3 192
26609b61 193 pr_buf(out, "gen %u", a.v->gen);
90541a74
KO
194
195 for (i = 0; i < BCH_ALLOC_FIELD_NR; i++)
196 if (a.v->fields & (1 << i))
197 pr_buf(out, " %s %llu",
198 bch2_alloc_field_names[i],
199 get_alloc_field(a.v, &d, i));
1c6fdbd8
KO
200}
201
90541a74 202static void __alloc_read_key(struct bucket *g, const struct bch_alloc *a)
1c6fdbd8 203{
90541a74 204 const void *d = a->data;
8fe826f9
KO
205 unsigned idx = 0, data_type, dirty_sectors, cached_sectors;
206 struct bucket_mark m;
90541a74 207
90541a74
KO
208 g->io_time[READ] = get_alloc_field(a, &d, idx++);
209 g->io_time[WRITE] = get_alloc_field(a, &d, idx++);
8fe826f9
KO
210 data_type = get_alloc_field(a, &d, idx++);
211 dirty_sectors = get_alloc_field(a, &d, idx++);
212 cached_sectors = get_alloc_field(a, &d, idx++);
76f4c7b0 213 g->oldest_gen = get_alloc_field(a, &d, idx++);
8fe826f9
KO
214
215 bucket_cmpxchg(g, m, ({
216 m.gen = a->gen;
217 m.data_type = data_type;
218 m.dirty_sectors = dirty_sectors;
219 m.cached_sectors = cached_sectors;
220 }));
221
222 g->gen_valid = 1;
1c6fdbd8
KO
223}
224
8eb7f3ee
KO
225static void __alloc_write_key(struct bkey_i_alloc *a, struct bucket *g,
226 struct bucket_mark m)
1c6fdbd8 227{
90541a74
KO
228 unsigned idx = 0;
229 void *d = a->v.data;
1c6fdbd8 230
90541a74
KO
231 a->v.fields = 0;
232 a->v.gen = m.gen;
233
234 d = a->v.data;
235 put_alloc_field(a, &d, idx++, g->io_time[READ]);
236 put_alloc_field(a, &d, idx++, g->io_time[WRITE]);
237 put_alloc_field(a, &d, idx++, m.data_type);
238 put_alloc_field(a, &d, idx++, m.dirty_sectors);
239 put_alloc_field(a, &d, idx++, m.cached_sectors);
76f4c7b0 240 put_alloc_field(a, &d, idx++, g->oldest_gen);
90541a74
KO
241
242 set_bkey_val_bytes(&a->k, (void *) d - (void *) &a->v);
1c6fdbd8
KO
243}
244
245static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
246{
247 struct bch_dev *ca;
248 struct bkey_s_c_alloc a;
1c6fdbd8 249
26609b61 250 if (k.k->type != KEY_TYPE_alloc)
1c6fdbd8
KO
251 return;
252
253 a = bkey_s_c_to_alloc(k);
254 ca = bch_dev_bkey_exists(c, a.k->p.inode);
255
256 if (a.k->p.offset >= ca->mi.nbuckets)
257 return;
258
9166b41d 259 percpu_down_read(&c->mark_lock);
90541a74 260 __alloc_read_key(bucket(ca, a.k->p.offset), a.v);
9166b41d 261 percpu_up_read(&c->mark_lock);
1c6fdbd8
KO
262}
263
264int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
265{
266 struct journal_replay *r;
267 struct btree_iter iter;
268 struct bkey_s_c k;
269 struct bch_dev *ca;
270 unsigned i;
271 int ret;
272
273 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) {
274 bch2_alloc_read_key(c, k);
275 bch2_btree_iter_cond_resched(&iter);
276 }
277
278 ret = bch2_btree_iter_unlock(&iter);
279 if (ret)
280 return ret;
281
282 list_for_each_entry(r, journal_replay_list, list) {
283 struct bkey_i *k, *n;
284 struct jset_entry *entry;
285
286 for_each_jset_key(k, n, entry, &r->j)
287 if (entry->btree_id == BTREE_ID_ALLOC)
288 bch2_alloc_read_key(c, bkey_i_to_s_c(k));
289 }
290
430735cd
KO
291 for_each_member_device(ca, c, i)
292 bch2_dev_usage_from_buckets(c, ca);
293
1c6fdbd8
KO
294 mutex_lock(&c->bucket_clock[READ].lock);
295 for_each_member_device(ca, c, i) {
296 down_read(&ca->bucket_lock);
297 bch2_recalc_oldest_io(c, ca, READ);
298 up_read(&ca->bucket_lock);
299 }
300 mutex_unlock(&c->bucket_clock[READ].lock);
301
302 mutex_lock(&c->bucket_clock[WRITE].lock);
303 for_each_member_device(ca, c, i) {
304 down_read(&ca->bucket_lock);
305 bch2_recalc_oldest_io(c, ca, WRITE);
306 up_read(&ca->bucket_lock);
307 }
308 mutex_unlock(&c->bucket_clock[WRITE].lock);
309
310 return 0;
311}
312
0564b167
KO
313int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
314{
315 struct btree_trans trans;
316 struct btree_iter *iter;
317 struct bch_dev *ca;
318 int ret;
319
320 if (k->k.p.inode >= c->sb.nr_devices ||
321 !c->devs[k->k.p.inode])
322 return 0;
323
324 ca = bch_dev_bkey_exists(c, k->k.p.inode);
325
326 if (k->k.p.offset >= ca->mi.nbuckets)
327 return 0;
328
329 bch2_trans_init(&trans, c);
330
331 iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, k->k.p,
332 BTREE_ITER_INTENT);
333
334 ret = bch2_btree_iter_traverse(iter);
335 if (ret)
336 goto err;
337
338 /* check buckets_written with btree node locked: */
339 if (test_bit(k->k.p.offset, ca->buckets_written)) {
340 ret = 0;
341 goto err;
342 }
343
344 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
345
346 ret = bch2_trans_commit(&trans, NULL, NULL,
347 BTREE_INSERT_NOFAIL|
348 BTREE_INSERT_JOURNAL_REPLAY|
349 BTREE_INSERT_NOMARK);
350err:
351 bch2_trans_exit(&trans);
352 return ret;
353}
354
355static int __bch2_alloc_write_key(struct btree_trans *trans, struct bch_dev *ca,
1c6fdbd8 356 size_t b, struct btree_iter *iter,
b29e197a 357 u64 *journal_seq, unsigned flags)
1c6fdbd8 358{
0564b167 359 struct bch_fs *c = trans->c;
90541a74
KO
360#if 0
361 __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key;
362#else
363 /* hack: */
364 __BKEY_PADDED(k, 8) alloc_key;
365#endif
366 struct bkey_i_alloc *a = bkey_alloc_init(&alloc_key.k);
8eb7f3ee 367 struct bucket *g;
430735cd 368 struct bucket_mark m, new;
61274e9d 369 int ret;
1c6fdbd8 370
90541a74 371 BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
b29e197a 372
90541a74 373 a->k.p = POS(ca->dev_idx, b);
b29e197a 374
430735cd
KO
375 bch2_btree_iter_set_pos(iter, a->k.p);
376
377 ret = bch2_btree_iter_traverse(iter);
378 if (ret)
379 return ret;
380
9166b41d 381 percpu_down_read(&c->mark_lock);
8eb7f3ee 382 g = bucket(ca, b);
430735cd
KO
383 m = READ_ONCE(g->mark);
384
385 if (!m.dirty) {
386 percpu_up_read(&c->mark_lock);
387 return 0;
388 }
8eb7f3ee
KO
389
390 __alloc_write_key(a, g, m);
9166b41d 391 percpu_up_read(&c->mark_lock);
1c6fdbd8 392
b29e197a 393 bch2_btree_iter_cond_resched(iter);
1c6fdbd8 394
0564b167
KO
395 bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
396
397 ret = bch2_trans_commit(trans, NULL, journal_seq,
430735cd 398 BTREE_INSERT_NOCHECK_RW|
61274e9d
KO
399 BTREE_INSERT_NOFAIL|
400 BTREE_INSERT_USE_RESERVE|
401 BTREE_INSERT_USE_ALLOC_RESERVE|
8fe826f9 402 BTREE_INSERT_NOMARK|
0564b167 403 flags);
430735cd
KO
404 if (ret)
405 return ret;
61274e9d 406
430735cd
KO
407 new = m;
408 new.dirty = false;
409 atomic64_cmpxchg(&g->_mark.v, m.v.counter, new.v.counter);
410
411 if (ca->buckets_written)
61274e9d
KO
412 set_bit(b, ca->buckets_written);
413
430735cd 414 return 0;
1c6fdbd8
KO
415}
416
d0cc3def 417int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote)
1c6fdbd8
KO
418{
419 struct bch_dev *ca;
420 unsigned i;
421 int ret = 0;
422
d0cc3def
KO
423 *wrote = false;
424
1c6fdbd8 425 for_each_rw_member(ca, c, i) {
0564b167
KO
426 struct btree_trans trans;
427 struct btree_iter *iter;
8eb7f3ee
KO
428 struct bucket_array *buckets;
429 size_t b;
1c6fdbd8 430
0564b167
KO
431 bch2_trans_init(&trans, c);
432
433 iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
434 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1c6fdbd8
KO
435
436 down_read(&ca->bucket_lock);
8eb7f3ee
KO
437 buckets = bucket_array(ca);
438
439 for (b = buckets->first_bucket;
440 b < buckets->nbuckets;
441 b++) {
442 if (!buckets->b[b].mark.dirty)
443 continue;
444
0564b167 445 ret = __bch2_alloc_write_key(&trans, ca, b, iter, NULL,
d0cc3def
KO
446 nowait
447 ? BTREE_INSERT_NOWAIT
448 : 0);
1c6fdbd8
KO
449 if (ret)
450 break;
d0cc3def
KO
451
452 *wrote = true;
1c6fdbd8
KO
453 }
454 up_read(&ca->bucket_lock);
0564b167
KO
455
456 bch2_trans_exit(&trans);
1c6fdbd8
KO
457
458 if (ret) {
459 percpu_ref_put(&ca->io_ref);
460 break;
461 }
462 }
463
464 return ret;
465}
466
467/* Bucket IO clocks: */
468
469static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
470{
471 struct bucket_clock *clock = &c->bucket_clock[rw];
472 struct bucket_array *buckets = bucket_array(ca);
473 struct bucket *g;
474 u16 max_last_io = 0;
475 unsigned i;
476
477 lockdep_assert_held(&c->bucket_clock[rw].lock);
478
479 /* Recalculate max_last_io for this device: */
480 for_each_bucket(g, buckets)
481 max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
482
483 ca->max_last_bucket_io[rw] = max_last_io;
484
485 /* Recalculate global max_last_io: */
486 max_last_io = 0;
487
488 for_each_member_device(ca, c, i)
489 max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
490
491 clock->max_last_io = max_last_io;
492}
493
494static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
495{
496 struct bucket_clock *clock = &c->bucket_clock[rw];
497 struct bucket_array *buckets;
498 struct bch_dev *ca;
499 struct bucket *g;
500 unsigned i;
501
502 trace_rescale_prios(c);
503
504 for_each_member_device(ca, c, i) {
505 down_read(&ca->bucket_lock);
506 buckets = bucket_array(ca);
507
508 for_each_bucket(g, buckets)
509 g->io_time[rw] = clock->hand -
510 bucket_last_io(c, g, rw) / 2;
511
512 bch2_recalc_oldest_io(c, ca, rw);
513
514 up_read(&ca->bucket_lock);
515 }
516}
517
8b335bae
KO
518static inline u64 bucket_clock_freq(u64 capacity)
519{
520 return max(capacity >> 10, 2028ULL);
521}
522
1c6fdbd8
KO
523static void bch2_inc_clock_hand(struct io_timer *timer)
524{
525 struct bucket_clock *clock = container_of(timer,
526 struct bucket_clock, rescale);
527 struct bch_fs *c = container_of(clock,
528 struct bch_fs, bucket_clock[clock->rw]);
529 struct bch_dev *ca;
530 u64 capacity;
531 unsigned i;
532
533 mutex_lock(&clock->lock);
534
535 /* if clock cannot be advanced more, rescale prio */
536 if (clock->max_last_io >= U16_MAX - 2)
537 bch2_rescale_bucket_io_times(c, clock->rw);
538
539 BUG_ON(clock->max_last_io >= U16_MAX - 2);
540
541 for_each_member_device(ca, c, i)
542 ca->max_last_bucket_io[clock->rw]++;
543 clock->max_last_io++;
544 clock->hand++;
545
546 mutex_unlock(&clock->lock);
547
548 capacity = READ_ONCE(c->capacity);
549
550 if (!capacity)
551 return;
552
553 /*
554 * we only increment when 0.1% of the filesystem capacity has been read
555 * or written too, this determines if it's time
556 *
557 * XXX: we shouldn't really be going off of the capacity of devices in
558 * RW mode (that will be 0 when we're RO, yet we can still service
559 * reads)
560 */
8b335bae 561 timer->expire += bucket_clock_freq(capacity);
1c6fdbd8
KO
562
563 bch2_io_timer_add(&c->io_clock[clock->rw], timer);
564}
565
566static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
567{
568 struct bucket_clock *clock = &c->bucket_clock[rw];
569
570 clock->hand = 1;
571 clock->rw = rw;
572 clock->rescale.fn = bch2_inc_clock_hand;
8b335bae 573 clock->rescale.expire = bucket_clock_freq(c->capacity);
1c6fdbd8
KO
574 mutex_init(&clock->lock);
575}
576
577/* Background allocator thread: */
578
579/*
580 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
581 * (marking them as invalidated on disk), then optionally issues discard
582 * commands to the newly free buckets, then puts them on the various freelists.
583 */
584
1c6fdbd8
KO
585#define BUCKET_GC_GEN_MAX 96U
586
587/**
588 * wait_buckets_available - wait on reclaimable buckets
589 *
590 * If there aren't enough available buckets to fill up free_inc, wait until
591 * there are.
592 */
593static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
594{
595 unsigned long gc_count = c->gc_count;
596 int ret = 0;
597
598 while (1) {
599 set_current_state(TASK_INTERRUPTIBLE);
600 if (kthread_should_stop()) {
601 ret = 1;
602 break;
603 }
604
605 if (gc_count != c->gc_count)
606 ca->inc_gen_really_needs_gc = 0;
607
608 if ((ssize_t) (dev_buckets_available(c, ca) -
609 ca->inc_gen_really_needs_gc) >=
610 (ssize_t) fifo_free(&ca->free_inc))
611 break;
612
613 up_read(&c->gc_lock);
614 schedule();
615 try_to_freeze();
616 down_read(&c->gc_lock);
617 }
618
619 __set_current_state(TASK_RUNNING);
620 return ret;
621}
622
623static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
624 size_t bucket,
625 struct bucket_mark mark)
626{
627 u8 gc_gen;
628
629 if (!is_available_bucket(mark))
630 return false;
631
8eb7f3ee
KO
632 if (ca->buckets_nouse &&
633 test_bit(bucket, ca->buckets_nouse))
634 return false;
635
1c6fdbd8
KO
636 gc_gen = bucket_gc_gen(ca, bucket);
637
638 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
639 ca->inc_gen_needs_gc++;
640
641 if (gc_gen >= BUCKET_GC_GEN_MAX)
642 ca->inc_gen_really_needs_gc++;
643
644 return gc_gen < BUCKET_GC_GEN_MAX;
645}
646
1c6fdbd8
KO
647/*
648 * Determines what order we're going to reuse buckets, smallest bucket_key()
649 * first.
650 *
651 *
652 * - We take into account the read prio of the bucket, which gives us an
653 * indication of how hot the data is -- we scale the prio so that the prio
654 * farthest from the clock is worth 1/8th of the closest.
655 *
656 * - The number of sectors of cached data in the bucket, which gives us an
657 * indication of the cost in cache misses this eviction will cause.
658 *
659 * - If hotness * sectors used compares equal, we pick the bucket with the
660 * smallest bucket_gc_gen() - since incrementing the same bucket's generation
661 * number repeatedly forces us to run mark and sweep gc to avoid generation
662 * number wraparound.
663 */
664
665static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
666 size_t b, struct bucket_mark m)
667{
668 unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
669 unsigned max_last_io = ca->max_last_bucket_io[READ];
670
671 /*
672 * Time since last read, scaled to [0, 8) where larger value indicates
673 * more recently read data:
674 */
675 unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
676
677 /* How much we want to keep the data in this bucket: */
678 unsigned long data_wantness =
679 (hotness + 1) * bucket_sectors_used(m);
680
681 unsigned long needs_journal_commit =
682 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
683
684 return (data_wantness << 9) |
685 (needs_journal_commit << 8) |
f84306a5 686 (bucket_gc_gen(ca, b) / 16);
1c6fdbd8
KO
687}
688
689static inline int bucket_alloc_cmp(alloc_heap *h,
690 struct alloc_heap_entry l,
691 struct alloc_heap_entry r)
692{
693 return (l.key > r.key) - (l.key < r.key) ?:
694 (l.nr < r.nr) - (l.nr > r.nr) ?:
695 (l.bucket > r.bucket) - (l.bucket < r.bucket);
696}
697
b29e197a
KO
698static inline int bucket_idx_cmp(const void *_l, const void *_r)
699{
700 const struct alloc_heap_entry *l = _l, *r = _r;
701
702 return (l->bucket > r->bucket) - (l->bucket < r->bucket);
703}
704
1c6fdbd8
KO
705static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
706{
707 struct bucket_array *buckets;
708 struct alloc_heap_entry e = { 0 };
b29e197a 709 size_t b, i, nr = 0;
1c6fdbd8
KO
710
711 ca->alloc_heap.used = 0;
712
713 mutex_lock(&c->bucket_clock[READ].lock);
714 down_read(&ca->bucket_lock);
715
716 buckets = bucket_array(ca);
717
718 bch2_recalc_oldest_io(c, ca, READ);
719
720 /*
721 * Find buckets with lowest read priority, by building a maxheap sorted
722 * by read priority and repeatedly replacing the maximum element until
723 * all buckets have been visited.
724 */
725 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
726 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
727 unsigned long key = bucket_sort_key(c, ca, b, m);
728
729 if (!bch2_can_invalidate_bucket(ca, b, m))
730 continue;
731
732 if (e.nr && e.bucket + e.nr == b && e.key == key) {
733 e.nr++;
734 } else {
735 if (e.nr)
198d6700
KO
736 heap_add_or_replace(&ca->alloc_heap, e,
737 -bucket_alloc_cmp, NULL);
1c6fdbd8
KO
738
739 e = (struct alloc_heap_entry) {
740 .bucket = b,
741 .nr = 1,
742 .key = key,
743 };
744 }
745
746 cond_resched();
747 }
748
749 if (e.nr)
198d6700
KO
750 heap_add_or_replace(&ca->alloc_heap, e,
751 -bucket_alloc_cmp, NULL);
1c6fdbd8 752
b29e197a
KO
753 for (i = 0; i < ca->alloc_heap.used; i++)
754 nr += ca->alloc_heap.data[i].nr;
1c6fdbd8 755
b29e197a
KO
756 while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
757 nr -= ca->alloc_heap.data[0].nr;
198d6700 758 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
1c6fdbd8 759 }
b29e197a
KO
760
761 up_read(&ca->bucket_lock);
762 mutex_unlock(&c->bucket_clock[READ].lock);
1c6fdbd8
KO
763}
764
765static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
766{
767 struct bucket_array *buckets = bucket_array(ca);
768 struct bucket_mark m;
b29e197a 769 size_t b, start;
1c6fdbd8 770
b29e197a
KO
771 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
772 ca->fifo_last_bucket >= ca->mi.nbuckets)
773 ca->fifo_last_bucket = ca->mi.first_bucket;
774
775 start = ca->fifo_last_bucket;
1c6fdbd8 776
b29e197a
KO
777 do {
778 ca->fifo_last_bucket++;
779 if (ca->fifo_last_bucket == ca->mi.nbuckets)
780 ca->fifo_last_bucket = ca->mi.first_bucket;
1c6fdbd8 781
b29e197a 782 b = ca->fifo_last_bucket;
1c6fdbd8
KO
783 m = READ_ONCE(buckets->b[b].mark);
784
b29e197a
KO
785 if (bch2_can_invalidate_bucket(ca, b, m)) {
786 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
787
198d6700 788 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
b29e197a
KO
789 if (heap_full(&ca->alloc_heap))
790 break;
791 }
1c6fdbd8
KO
792
793 cond_resched();
b29e197a 794 } while (ca->fifo_last_bucket != start);
1c6fdbd8
KO
795}
796
797static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
798{
799 struct bucket_array *buckets = bucket_array(ca);
800 struct bucket_mark m;
b29e197a 801 size_t checked, i;
1c6fdbd8
KO
802
803 for (checked = 0;
b29e197a 804 checked < ca->mi.nbuckets / 2;
1c6fdbd8
KO
805 checked++) {
806 size_t b = bch2_rand_range(ca->mi.nbuckets -
807 ca->mi.first_bucket) +
808 ca->mi.first_bucket;
809
810 m = READ_ONCE(buckets->b[b].mark);
811
b29e197a
KO
812 if (bch2_can_invalidate_bucket(ca, b, m)) {
813 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
814
198d6700 815 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
b29e197a
KO
816 if (heap_full(&ca->alloc_heap))
817 break;
818 }
1c6fdbd8
KO
819
820 cond_resched();
821 }
b29e197a
KO
822
823 sort(ca->alloc_heap.data,
824 ca->alloc_heap.used,
825 sizeof(ca->alloc_heap.data[0]),
826 bucket_idx_cmp, NULL);
827
828 /* remove duplicates: */
829 for (i = 0; i + 1 < ca->alloc_heap.used; i++)
830 if (ca->alloc_heap.data[i].bucket ==
831 ca->alloc_heap.data[i + 1].bucket)
832 ca->alloc_heap.data[i].nr = 0;
1c6fdbd8
KO
833}
834
b29e197a 835static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
1c6fdbd8 836{
b29e197a
KO
837 size_t i, nr = 0;
838
1c6fdbd8 839 ca->inc_gen_needs_gc = 0;
1c6fdbd8
KO
840
841 switch (ca->mi.replacement) {
842 case CACHE_REPLACEMENT_LRU:
843 find_reclaimable_buckets_lru(c, ca);
844 break;
845 case CACHE_REPLACEMENT_FIFO:
846 find_reclaimable_buckets_fifo(c, ca);
847 break;
848 case CACHE_REPLACEMENT_RANDOM:
849 find_reclaimable_buckets_random(c, ca);
850 break;
851 }
b29e197a 852
198d6700 853 heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
b29e197a
KO
854
855 for (i = 0; i < ca->alloc_heap.used; i++)
856 nr += ca->alloc_heap.data[i].nr;
857
858 return nr;
1c6fdbd8
KO
859}
860
b29e197a 861static inline long next_alloc_bucket(struct bch_dev *ca)
1c6fdbd8 862{
b29e197a
KO
863 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
864
865 while (ca->alloc_heap.used) {
866 if (top->nr) {
867 size_t b = top->bucket;
868
869 top->bucket++;
870 top->nr--;
871 return b;
872 }
1c6fdbd8 873
198d6700 874 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
b29e197a
KO
875 }
876
877 return -1;
1c6fdbd8
KO
878}
879
8fe826f9
KO
880/*
881 * returns sequence number of most recent journal entry that updated this
882 * bucket:
883 */
884static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
885{
886 if (m.journal_seq_valid) {
887 u64 journal_seq = atomic64_read(&c->journal.seq);
888 u64 bucket_seq = journal_seq;
889
890 bucket_seq &= ~((u64) U16_MAX);
891 bucket_seq |= m.journal_seq;
892
893 if (bucket_seq > journal_seq)
894 bucket_seq -= 1 << 16;
895
896 return bucket_seq;
897 } else {
898 return 0;
899 }
900}
901
0564b167
KO
902static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
903 struct bch_dev *ca,
8fe826f9
KO
904 struct btree_iter *iter,
905 u64 *journal_seq, unsigned flags)
906{
907#if 0
908 __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key;
909#else
910 /* hack: */
911 __BKEY_PADDED(k, 8) alloc_key;
912#endif
0564b167 913 struct bch_fs *c = trans->c;
8fe826f9
KO
914 struct bkey_i_alloc *a;
915 struct bkey_alloc_unpacked u;
916 struct bucket_mark m;
917 struct bkey_s_c k;
918 bool invalidating_cached_data;
919 size_t b;
920 int ret;
921
922 BUG_ON(!ca->alloc_heap.used ||
923 !ca->alloc_heap.data[0].nr);
924 b = ca->alloc_heap.data[0].bucket;
925
926 /* first, put on free_inc and mark as owned by allocator: */
927 percpu_down_read(&c->mark_lock);
928 spin_lock(&c->freelist_lock);
929
930 verify_not_on_freelist(c, ca, b);
931
932 BUG_ON(!fifo_push(&ca->free_inc, b));
933
934 bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
935 m = bucket(ca, b)->mark;
936
937 spin_unlock(&c->freelist_lock);
938 percpu_up_read(&c->mark_lock);
939
940 bch2_btree_iter_cond_resched(iter);
941
942 BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
943
944 bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
945retry:
946 k = bch2_btree_iter_peek_slot(iter);
947 ret = btree_iter_err(k);
948 if (ret)
949 return ret;
950
951 if (k.k && k.k->type == KEY_TYPE_alloc)
952 u = bch2_alloc_unpack(bkey_s_c_to_alloc(k).v);
953 else
954 memset(&u, 0, sizeof(u));
955
18c9883e 956 invalidating_cached_data = m.cached_sectors != 0;
8fe826f9
KO
957
958 //BUG_ON(u.dirty_sectors);
959 u.data_type = 0;
960 u.dirty_sectors = 0;
961 u.cached_sectors = 0;
962 u.read_time = c->bucket_clock[READ].hand;
963 u.write_time = c->bucket_clock[WRITE].hand;
18c9883e
KO
964
965 /*
966 * The allocator has to start before journal replay is finished - thus,
967 * we have to trust the in memory bucket @m, not the version in the
968 * btree:
969 */
970 u.gen = m.gen + 1;
8fe826f9
KO
971
972 a = bkey_alloc_init(&alloc_key.k);
973 a->k.p = iter->pos;
974 bch2_alloc_pack(a, u);
975
0564b167
KO
976 bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
977
61f321fc
KO
978 /*
979 * XXX:
980 * when using deferred btree updates, we have journal reclaim doing
981 * btree updates and thus requiring the allocator to make forward
982 * progress, and here the allocator is requiring space in the journal -
983 * so we need a journal pre-reservation:
984 */
0564b167
KO
985 ret = bch2_trans_commit(trans, NULL,
986 invalidating_cached_data ? journal_seq : NULL,
987 BTREE_INSERT_ATOMIC|
988 BTREE_INSERT_NOUNLOCK|
989 BTREE_INSERT_NOCHECK_RW|
990 BTREE_INSERT_NOFAIL|
991 BTREE_INSERT_USE_RESERVE|
992 BTREE_INSERT_USE_ALLOC_RESERVE|
993 flags);
8fe826f9
KO
994 if (ret == -EINTR)
995 goto retry;
996
997 if (!ret) {
998 /* remove from alloc_heap: */
999 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
1000
1001 top->bucket++;
1002 top->nr--;
1003
1004 if (!top->nr)
1005 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
1006
18c9883e
KO
1007 /* with btree still locked: */
1008 if (ca->buckets_written)
1009 set_bit(b, ca->buckets_written);
1010
8fe826f9
KO
1011 /*
1012 * Make sure we flush the last journal entry that updated this
1013 * bucket (i.e. deleting the last reference) before writing to
1014 * this bucket again:
1015 */
1016 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
1017 } else {
1018 size_t b2;
1019
1020 /* remove from free_inc: */
1021 percpu_down_read(&c->mark_lock);
1022 spin_lock(&c->freelist_lock);
1023
1024 bch2_mark_alloc_bucket(c, ca, b, false,
1025 gc_pos_alloc(c, NULL), 0);
1026
1027 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
1028 BUG_ON(b != b2);
1029
1030 spin_unlock(&c->freelist_lock);
1031 percpu_up_read(&c->mark_lock);
1032 }
1033
1034 return ret;
1035}
1036
b29e197a
KO
1037static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
1038 size_t bucket, u64 *flush_seq)
1c6fdbd8 1039{
b29e197a 1040 struct bucket_mark m;
1c6fdbd8 1041
9166b41d 1042 percpu_down_read(&c->mark_lock);
1c6fdbd8 1043 spin_lock(&c->freelist_lock);
b29e197a
KO
1044
1045 bch2_invalidate_bucket(c, ca, bucket, &m);
1046
1047 verify_not_on_freelist(c, ca, bucket);
1048 BUG_ON(!fifo_push(&ca->free_inc, bucket));
1049
1c6fdbd8 1050 spin_unlock(&c->freelist_lock);
b29e197a
KO
1051
1052 bucket_io_clock_reset(c, ca, bucket, READ);
1053 bucket_io_clock_reset(c, ca, bucket, WRITE);
1054
9166b41d 1055 percpu_up_read(&c->mark_lock);
b29e197a 1056
8fe826f9 1057 *flush_seq = max(*flush_seq, bucket_journal_seq(c, m));
b29e197a
KO
1058
1059 return m.cached_sectors != 0;
1c6fdbd8
KO
1060}
1061
b29e197a
KO
1062/*
1063 * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
1064 */
1065static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
1c6fdbd8 1066{
0564b167
KO
1067 struct btree_trans trans;
1068 struct btree_iter *iter;
b29e197a 1069 u64 journal_seq = 0;
1c6fdbd8
KO
1070 int ret = 0;
1071
0564b167
KO
1072 bch2_trans_init(&trans, c);
1073
1074 iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
1075 POS(ca->dev_idx, 0),
1076 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1c6fdbd8
KO
1077
1078 /* Only use nowait if we've already invalidated at least one bucket: */
b29e197a
KO
1079 while (!ret &&
1080 !fifo_full(&ca->free_inc) &&
8fe826f9 1081 ca->alloc_heap.used)
0564b167 1082 ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
053dbb37 1083 BTREE_INSERT_GC_LOCK_HELD|
8c96cfcc
KO
1084 (!fifo_empty(&ca->free_inc)
1085 ? BTREE_INSERT_NOWAIT : 0));
1c6fdbd8 1086
0564b167 1087 bch2_trans_exit(&trans);
1c6fdbd8
KO
1088
1089 /* If we used NOWAIT, don't return the error: */
b29e197a
KO
1090 if (!fifo_empty(&ca->free_inc))
1091 ret = 0;
1092 if (ret) {
1093 bch_err(ca, "error invalidating buckets: %i", ret);
1094 return ret;
1095 }
1c6fdbd8 1096
b29e197a
KO
1097 if (journal_seq)
1098 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
1099 if (ret) {
1100 bch_err(ca, "journal error: %i", ret);
1101 return ret;
1102 }
1c6fdbd8 1103
b29e197a 1104 return 0;
1c6fdbd8
KO
1105}
1106
1107static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
1108{
b29e197a 1109 unsigned i;
1c6fdbd8
KO
1110 int ret = 0;
1111
1112 while (1) {
1113 set_current_state(TASK_INTERRUPTIBLE);
1114
b29e197a
KO
1115 spin_lock(&c->freelist_lock);
1116 for (i = 0; i < RESERVE_NR; i++)
1117 if (fifo_push(&ca->free[i], bucket)) {
1118 fifo_pop(&ca->free_inc, bucket);
430735cd 1119
b29e197a 1120 closure_wake_up(&c->freelist_wait);
430735cd
KO
1121 ca->allocator_blocked_full = false;
1122
b29e197a
KO
1123 spin_unlock(&c->freelist_lock);
1124 goto out;
1125 }
430735cd
KO
1126
1127 if (!ca->allocator_blocked_full) {
1128 ca->allocator_blocked_full = true;
1129 closure_wake_up(&c->freelist_wait);
1130 }
1131
b29e197a 1132 spin_unlock(&c->freelist_lock);
1c6fdbd8
KO
1133
1134 if ((current->flags & PF_KTHREAD) &&
1135 kthread_should_stop()) {
1136 ret = 1;
1137 break;
1138 }
1139
1140 schedule();
1141 try_to_freeze();
1142 }
b29e197a 1143out:
1c6fdbd8
KO
1144 __set_current_state(TASK_RUNNING);
1145 return ret;
1146}
1147
1148/*
b29e197a
KO
1149 * Pulls buckets off free_inc, discards them (if enabled), then adds them to
1150 * freelists, waiting until there's room if necessary:
1c6fdbd8
KO
1151 */
1152static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
1153{
b29e197a 1154 while (!fifo_empty(&ca->free_inc)) {
1c6fdbd8
KO
1155 size_t bucket = fifo_peek(&ca->free_inc);
1156
1c6fdbd8
KO
1157 if (ca->mi.discard &&
1158 bdev_max_discard_sectors(ca->disk_sb.bdev))
1159 blkdev_issue_discard(ca->disk_sb.bdev,
1160 bucket_to_sector(ca, bucket),
1161 ca->mi.bucket_size, GFP_NOIO);
1162
1163 if (push_invalidated_bucket(c, ca, bucket))
1164 return 1;
1165 }
1166
1167 return 0;
1168}
1169
1170/**
1171 * bch_allocator_thread - move buckets from free_inc to reserves
1172 *
1173 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
1174 * the reserves are depleted by bucket allocation. When we run out
1175 * of free_inc, try to invalidate some buckets and write out
1176 * prios and gens.
1177 */
1178static int bch2_allocator_thread(void *arg)
1179{
1180 struct bch_dev *ca = arg;
1181 struct bch_fs *c = ca->fs;
b29e197a 1182 size_t nr;
1c6fdbd8
KO
1183 int ret;
1184
1185 set_freezable();
1186
1187 while (1) {
b29e197a 1188 cond_resched();
1c6fdbd8 1189
b29e197a
KO
1190 pr_debug("discarding %zu invalidated buckets",
1191 fifo_used(&ca->free_inc));
1c6fdbd8 1192
b29e197a
KO
1193 ret = discard_invalidated_buckets(c, ca);
1194 if (ret)
1195 goto stop;
1c6fdbd8 1196
94c1f4ad
KO
1197 down_read(&c->gc_lock);
1198
b29e197a 1199 ret = bch2_invalidate_buckets(c, ca);
94c1f4ad
KO
1200 if (ret) {
1201 up_read(&c->gc_lock);
b29e197a 1202 goto stop;
94c1f4ad 1203 }
1c6fdbd8 1204
94c1f4ad
KO
1205 if (!fifo_empty(&ca->free_inc)) {
1206 up_read(&c->gc_lock);
b29e197a 1207 continue;
94c1f4ad 1208 }
1c6fdbd8
KO
1209
1210 pr_debug("free_inc now empty");
1211
b29e197a 1212 do {
1c6fdbd8
KO
1213 /*
1214 * Find some buckets that we can invalidate, either
1215 * they're completely unused, or only contain clean data
1216 * that's been written back to the backing device or
1217 * another cache tier
1218 */
1219
1220 pr_debug("scanning for reclaimable buckets");
1221
b29e197a 1222 nr = find_reclaimable_buckets(c, ca);
1c6fdbd8 1223
b29e197a 1224 pr_debug("found %zu buckets", nr);
1c6fdbd8 1225
b29e197a 1226 trace_alloc_batch(ca, nr, ca->alloc_heap.size);
1c6fdbd8 1227
b29e197a
KO
1228 if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1229 ca->inc_gen_really_needs_gc) &&
1c6fdbd8
KO
1230 c->gc_thread) {
1231 atomic_inc(&c->kick_gc);
1232 wake_up_process(c->gc_thread);
1233 }
1234
1c6fdbd8 1235 /*
b29e197a
KO
1236 * If we found any buckets, we have to invalidate them
1237 * before we scan for more - but if we didn't find very
1238 * many we may want to wait on more buckets being
1239 * available so we don't spin:
1c6fdbd8 1240 */
b29e197a
KO
1241 if (!nr ||
1242 (nr < ALLOC_SCAN_BATCH(ca) &&
1243 !fifo_full(&ca->free[RESERVE_MOVINGGC]))) {
1244 ca->allocator_blocked = true;
1245 closure_wake_up(&c->freelist_wait);
1246
1247 ret = wait_buckets_available(c, ca);
1248 if (ret) {
1249 up_read(&c->gc_lock);
1250 goto stop;
1251 }
1c6fdbd8 1252 }
b29e197a 1253 } while (!nr);
1c6fdbd8
KO
1254
1255 ca->allocator_blocked = false;
1256 up_read(&c->gc_lock);
1257
b29e197a 1258 pr_debug("%zu buckets to invalidate", nr);
1c6fdbd8
KO
1259
1260 /*
b29e197a 1261 * alloc_heap is now full of newly-invalidated buckets: next,
1c6fdbd8
KO
1262 * write out the new bucket gens:
1263 */
1264 }
1265
1266stop:
1267 pr_debug("alloc thread stopping (ret %i)", ret);
1268 return 0;
1269}
1270
1c6fdbd8
KO
1271/* Startup/shutdown (ro/rw): */
1272
1273void bch2_recalc_capacity(struct bch_fs *c)
1274{
1275 struct bch_dev *ca;
a50ed7c8 1276 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
b092dadd 1277 unsigned bucket_size_max = 0;
1c6fdbd8
KO
1278 unsigned long ra_pages = 0;
1279 unsigned i, j;
1280
1281 lockdep_assert_held(&c->state_lock);
1282
1283 for_each_online_member(ca, c, i) {
1284 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
1285
1286 ra_pages += bdi->ra_pages;
1287 }
1288
1289 bch2_set_ra_pages(c, ra_pages);
1290
1291 for_each_rw_member(ca, c, i) {
a50ed7c8 1292 u64 dev_reserve = 0;
1c6fdbd8
KO
1293
1294 /*
1295 * We need to reserve buckets (from the number
1296 * of currently available buckets) against
1297 * foreground writes so that mainly copygc can
1298 * make forward progress.
1299 *
1300 * We need enough to refill the various reserves
1301 * from scratch - copygc will use its entire
1302 * reserve all at once, then run against when
1303 * its reserve is refilled (from the formerly
1304 * available buckets).
1305 *
1306 * This reserve is just used when considering if
1307 * allocations for foreground writes must wait -
1308 * not -ENOSPC calculations.
1309 */
1310 for (j = 0; j < RESERVE_NONE; j++)
a9bec520 1311 dev_reserve += ca->free[j].size;
1c6fdbd8 1312
a9bec520
KO
1313 dev_reserve += 1; /* btree write point */
1314 dev_reserve += 1; /* copygc write point */
1315 dev_reserve += 1; /* rebalance write point */
1c6fdbd8 1316
a9bec520 1317 dev_reserve *= ca->mi.bucket_size;
1c6fdbd8 1318
a50ed7c8 1319 ca->copygc_threshold = dev_reserve;
a9bec520 1320
a50ed7c8
KO
1321 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1322 ca->mi.first_bucket);
1c6fdbd8 1323
a50ed7c8 1324 reserved_sectors += dev_reserve * 2;
b092dadd
KO
1325
1326 bucket_size_max = max_t(unsigned, bucket_size_max,
1327 ca->mi.bucket_size);
a9bec520 1328 }
1c6fdbd8 1329
a50ed7c8
KO
1330 gc_reserve = c->opts.gc_reserve_bytes
1331 ? c->opts.gc_reserve_bytes >> 9
1332 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1333
1334 reserved_sectors = max(gc_reserve, reserved_sectors);
1c6fdbd8 1335
a50ed7c8 1336 reserved_sectors = min(reserved_sectors, capacity);
1c6fdbd8 1337
a9bec520 1338 c->capacity = capacity - reserved_sectors;
1c6fdbd8 1339
b092dadd
KO
1340 c->bucket_size_max = bucket_size_max;
1341
1c6fdbd8
KO
1342 if (c->capacity) {
1343 bch2_io_timer_add(&c->io_clock[READ],
1344 &c->bucket_clock[READ].rescale);
1345 bch2_io_timer_add(&c->io_clock[WRITE],
1346 &c->bucket_clock[WRITE].rescale);
1347 } else {
1348 bch2_io_timer_del(&c->io_clock[READ],
1349 &c->bucket_clock[READ].rescale);
1350 bch2_io_timer_del(&c->io_clock[WRITE],
1351 &c->bucket_clock[WRITE].rescale);
1352 }
1353
1354 /* Wake up case someone was waiting for buckets */
1355 closure_wake_up(&c->freelist_wait);
1356}
1357
1c6fdbd8
KO
1358static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1359{
1360 struct open_bucket *ob;
1361 bool ret = false;
1362
1363 for (ob = c->open_buckets;
1364 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1365 ob++) {
1366 spin_lock(&ob->lock);
1367 if (ob->valid && !ob->on_partial_list &&
1368 ob->ptr.dev == ca->dev_idx)
1369 ret = true;
1370 spin_unlock(&ob->lock);
1371 }
1372
1373 return ret;
1374}
1375
1376/* device goes ro: */
1377void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1378{
1379 unsigned i;
1380
1381 BUG_ON(ca->alloc_thread);
1382
1383 /* First, remove device from allocation groups: */
1384
1385 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1386 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1387
1388 /*
1389 * Capacity is calculated based off of devices in allocation groups:
1390 */
1391 bch2_recalc_capacity(c);
1392
1393 /* Next, close write points that point to this device... */
1394 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
7b3f84ea 1395 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1c6fdbd8 1396
7b3f84ea
KO
1397 bch2_writepoint_stop(c, ca, &ca->copygc_write_point);
1398 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1399 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1c6fdbd8
KO
1400
1401 mutex_lock(&c->btree_reserve_cache_lock);
1402 while (c->btree_reserve_cache_nr) {
1403 struct btree_alloc *a =
1404 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1405
ef337c54 1406 bch2_open_buckets_put(c, &a->ob);
1c6fdbd8
KO
1407 }
1408 mutex_unlock(&c->btree_reserve_cache_lock);
1409
cd575ddf
KO
1410 while (1) {
1411 struct open_bucket *ob;
1412
1413 spin_lock(&c->freelist_lock);
1414 if (!ca->open_buckets_partial_nr) {
1415 spin_unlock(&c->freelist_lock);
1416 break;
1417 }
1418 ob = c->open_buckets +
1419 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1420 ob->on_partial_list = false;
1421 spin_unlock(&c->freelist_lock);
1422
1423 bch2_open_bucket_put(c, ob);
1424 }
1425
1426 bch2_ec_stop_dev(c, ca);
1427
1c6fdbd8
KO
1428 /*
1429 * Wake up threads that were blocked on allocation, so they can notice
1430 * the device can no longer be removed and the capacity has changed:
1431 */
1432 closure_wake_up(&c->freelist_wait);
1433
1434 /*
1435 * journal_res_get() can block waiting for free space in the journal -
1436 * it needs to notice there may not be devices to allocate from anymore:
1437 */
1438 wake_up(&c->journal.wait);
1439
1440 /* Now wait for any in flight writes: */
1441
1442 closure_wait_event(&c->open_buckets_wait,
1443 !bch2_dev_has_open_write_point(c, ca));
1444}
1445
1446/* device goes rw: */
1447void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1448{
1449 unsigned i;
1450
1451 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1452 if (ca->mi.data_allowed & (1 << i))
1453 set_bit(ca->dev_idx, c->rw_devs[i].d);
1454}
1455
430735cd
KO
1456void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1457{
736affa8
KO
1458 if (ca->alloc_thread)
1459 closure_wait_event(&c->freelist_wait, ca->allocator_blocked_full);
430735cd
KO
1460}
1461
1c6fdbd8
KO
1462/* stop allocator thread: */
1463void bch2_dev_allocator_stop(struct bch_dev *ca)
1464{
1465 struct task_struct *p;
1466
1467 p = rcu_dereference_protected(ca->alloc_thread, 1);
1468 ca->alloc_thread = NULL;
1469
1470 /*
1471 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1472 * the thread shutting down to avoid bch2_wake_allocator() racing:
1473 *
1474 * XXX: it would be better to have the rcu barrier be asynchronous
1475 * instead of blocking us here
1476 */
1477 synchronize_rcu();
1478
1479 if (p) {
1480 kthread_stop(p);
1481 put_task_struct(p);
1482 }
1483}
1484
1485/* start allocator thread: */
1486int bch2_dev_allocator_start(struct bch_dev *ca)
1487{
1488 struct task_struct *p;
1489
1490 /*
1491 * allocator thread already started?
1492 */
1493 if (ca->alloc_thread)
1494 return 0;
1495
1496 p = kthread_create(bch2_allocator_thread, ca,
1497 "bch_alloc[%s]", ca->name);
1498 if (IS_ERR(p))
1499 return PTR_ERR(p);
1500
1501 get_task_struct(p);
1502 rcu_assign_pointer(ca->alloc_thread, p);
1503 wake_up_process(p);
1504 return 0;
1505}
1506
fcbf3e50 1507static bool flush_held_btree_writes(struct bch_fs *c)
b29e197a
KO
1508{
1509 struct bucket_table *tbl;
1510 struct rhash_head *pos;
1511 struct btree *b;
1633e492 1512 bool nodes_unwritten;
d0cc3def 1513 size_t i;
b29e197a 1514again:
b29e197a 1515 cond_resched();
1633e492 1516 nodes_unwritten = false;
b29e197a
KO
1517
1518 rcu_read_lock();
1519 for_each_cached_btree(b, c, tbl, i, pos)
d0cc3def 1520 if (btree_node_need_write(b)) {
b29e197a
KO
1521 if (btree_node_may_write(b)) {
1522 rcu_read_unlock();
1523 btree_node_lock_type(c, b, SIX_LOCK_read);
1524 bch2_btree_node_write(c, b, SIX_LOCK_read);
1525 six_unlock_read(&b->lock);
1526 goto again;
1527 } else {
1633e492 1528 nodes_unwritten = true;
b29e197a
KO
1529 }
1530 }
1531 rcu_read_unlock();
1532
1633e492 1533 if (c->btree_roots_dirty) {
b29e197a 1534 bch2_journal_meta(&c->journal);
b29e197a
KO
1535 goto again;
1536 }
1537
1633e492
KO
1538 return !nodes_unwritten &&
1539 !bch2_btree_interior_updates_nr_pending(c);
1540}
1541
1c6fdbd8
KO
1542static void allocator_start_issue_discards(struct bch_fs *c)
1543{
1544 struct bch_dev *ca;
1545 unsigned dev_iter;
b29e197a 1546 size_t bu;
1c6fdbd8 1547
b29e197a
KO
1548 for_each_rw_member(ca, c, dev_iter)
1549 while (fifo_pop(&ca->free_inc, bu))
1c6fdbd8
KO
1550 blkdev_issue_discard(ca->disk_sb.bdev,
1551 bucket_to_sector(ca, bu),
1552 ca->mi.bucket_size, GFP_NOIO);
1c6fdbd8
KO
1553}
1554
5e5d9bdb
KO
1555static int resize_free_inc(struct bch_dev *ca)
1556{
1557 alloc_fifo free_inc;
1558
1559 if (!fifo_full(&ca->free_inc))
1560 return 0;
1561
1562 if (!init_fifo(&free_inc,
1563 ca->free_inc.size * 2,
1564 GFP_KERNEL))
1565 return -ENOMEM;
1566
1567 fifo_move(&free_inc, &ca->free_inc);
1568 swap(free_inc, ca->free_inc);
1569 free_fifo(&free_inc);
1570 return 0;
1571}
1572
fcbf3e50 1573static bool bch2_fs_allocator_start_fast(struct bch_fs *c)
1c6fdbd8
KO
1574{
1575 struct bch_dev *ca;
1c6fdbd8 1576 unsigned dev_iter;
fcbf3e50 1577 bool ret = true;
1c6fdbd8 1578
d0cc3def 1579 if (test_alloc_startup(c))
fcbf3e50
KO
1580 return false;
1581
1582 down_read(&c->gc_lock);
b29e197a 1583
1c6fdbd8
KO
1584 /* Scan for buckets that are already invalidated: */
1585 for_each_rw_member(ca, c, dev_iter) {
61274e9d 1586 struct bucket_array *buckets;
1c6fdbd8 1587 struct bucket_mark m;
fcbf3e50 1588 long bu;
1c6fdbd8 1589
61274e9d 1590 down_read(&ca->bucket_lock);
61274e9d 1591 buckets = bucket_array(ca);
1c6fdbd8 1592
61274e9d
KO
1593 for (bu = buckets->first_bucket;
1594 bu < buckets->nbuckets; bu++) {
1595 m = READ_ONCE(buckets->b[bu].mark);
1c6fdbd8 1596
90541a74 1597 if (!buckets->b[bu].gen_valid ||
61274e9d 1598 !is_available_bucket(m) ||
fcbf3e50
KO
1599 m.cached_sectors ||
1600 (ca->buckets_nouse &&
1601 test_bit(bu, ca->buckets_nouse)))
1c6fdbd8
KO
1602 continue;
1603
fcbf3e50 1604 percpu_down_read(&c->mark_lock);
1c6fdbd8 1605 bch2_mark_alloc_bucket(c, ca, bu, true,
9ca53b55 1606 gc_pos_alloc(c, NULL), 0);
fcbf3e50 1607 percpu_up_read(&c->mark_lock);
1c6fdbd8
KO
1608
1609 fifo_push(&ca->free_inc, bu);
1c6fdbd8 1610
61274e9d
KO
1611 discard_invalidated_buckets(c, ca);
1612
1613 if (fifo_full(&ca->free[RESERVE_BTREE]))
1c6fdbd8
KO
1614 break;
1615 }
61274e9d 1616 up_read(&ca->bucket_lock);
1c6fdbd8
KO
1617 }
1618
fcbf3e50
KO
1619 up_read(&c->gc_lock);
1620
1c6fdbd8
KO
1621 /* did we find enough buckets? */
1622 for_each_rw_member(ca, c, dev_iter)
fcbf3e50
KO
1623 if (!fifo_full(&ca->free[RESERVE_BTREE]))
1624 ret = false;
1625
1626 return ret;
1627}
1628
1629static int __bch2_fs_allocator_start(struct bch_fs *c)
1630{
1631 struct bch_dev *ca;
1632 unsigned dev_iter;
1633 u64 journal_seq = 0;
1634 bool wrote;
1635 long bu;
1636 int ret = 0;
1c6fdbd8 1637
61274e9d 1638 pr_debug("not enough empty buckets; scanning for reclaimable buckets");
1c6fdbd8 1639
1c6fdbd8
KO
1640 /*
1641 * We're moving buckets to freelists _before_ they've been marked as
1642 * invalidated on disk - we have to so that we can allocate new btree
1643 * nodes to mark them as invalidated on disk.
1644 *
1645 * However, we can't _write_ to any of these buckets yet - they might
1646 * have cached data in them, which is live until they're marked as
1647 * invalidated on disk:
1648 */
d0cc3def 1649 set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
1c6fdbd8 1650
fcbf3e50
KO
1651 down_read(&c->gc_lock);
1652 do {
1653 wrote = false;
1c6fdbd8 1654
d0cc3def
KO
1655 for_each_rw_member(ca, c, dev_iter) {
1656 find_reclaimable_buckets(c, ca);
1c6fdbd8 1657
d0cc3def
KO
1658 while (!fifo_full(&ca->free[RESERVE_BTREE]) &&
1659 (bu = next_alloc_bucket(ca)) >= 0) {
5e5d9bdb
KO
1660 ret = resize_free_inc(ca);
1661 if (ret) {
1662 percpu_ref_put(&ca->io_ref);
fcbf3e50
KO
1663 up_read(&c->gc_lock);
1664 goto err;
5e5d9bdb
KO
1665 }
1666
d0cc3def
KO
1667 bch2_invalidate_one_bucket(c, ca, bu,
1668 &journal_seq);
1669
1670 fifo_push(&ca->free[RESERVE_BTREE], bu);
d0cc3def
KO
1671 }
1672 }
1673
1674 pr_debug("done scanning for reclaimable buckets");
1675
1676 /*
1677 * XXX: it's possible for this to deadlock waiting on journal reclaim,
1678 * since we're holding btree writes. What then?
1679 */
1680 ret = bch2_alloc_write(c, true, &wrote);
1c6fdbd8 1681
d0cc3def
KO
1682 /*
1683 * If bch2_alloc_write() did anything, it may have used some
1684 * buckets, and we need the RESERVE_BTREE freelist full - so we
1685 * need to loop and scan again.
1686 * And if it errored, it may have been because there weren't
1687 * enough buckets, so just scan and loop again as long as it
1688 * made some progress:
1689 */
fcbf3e50
KO
1690 } while (wrote);
1691 up_read(&c->gc_lock);
1692
1693 if (ret)
1694 goto err;
1c6fdbd8 1695
d0cc3def
KO
1696 pr_debug("flushing journal");
1697
1698 ret = bch2_journal_flush(&c->journal);
1699 if (ret)
fcbf3e50 1700 goto err;
d0cc3def
KO
1701
1702 pr_debug("issuing discards");
1703 allocator_start_issue_discards(c);
fcbf3e50
KO
1704err:
1705 clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
1706 closure_wait_event(&c->btree_interior_update_wait,
1707 flush_held_btree_writes(c));
d0cc3def 1708
fcbf3e50 1709 return ret;
1c6fdbd8
KO
1710}
1711
1712int bch2_fs_allocator_start(struct bch_fs *c)
1713{
1714 struct bch_dev *ca;
1715 unsigned i;
1716 int ret;
1717
fcbf3e50
KO
1718 ret = bch2_fs_allocator_start_fast(c) ? 0 :
1719 __bch2_fs_allocator_start(c);
1c6fdbd8
KO
1720 if (ret)
1721 return ret;
1722
fcbf3e50
KO
1723 set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
1724
1c6fdbd8
KO
1725 for_each_rw_member(ca, c, i) {
1726 ret = bch2_dev_allocator_start(ca);
1727 if (ret) {
1728 percpu_ref_put(&ca->io_ref);
1729 return ret;
1730 }
1731 }
1732
b935a8a6 1733 set_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
86a225c4 1734 return 0;
1c6fdbd8
KO
1735}
1736
b092dadd 1737void bch2_fs_allocator_background_init(struct bch_fs *c)
1c6fdbd8 1738{
1c6fdbd8
KO
1739 spin_lock_init(&c->freelist_lock);
1740 bch2_bucket_clock_init(c, READ);
1741 bch2_bucket_clock_init(c, WRITE);
1742
1c6fdbd8
KO
1743 c->pd_controllers_update_seconds = 5;
1744 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);
1745}