Commit | Line | Data |
---|---|---|
1c6fdbd8 KO |
1 | /* |
2 | * Primary bucket allocation code | |
3 | * | |
4 | * Copyright 2012 Google, Inc. | |
5 | * | |
6 | * Allocation in bcache is done in terms of buckets: | |
7 | * | |
8 | * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in | |
9 | * btree pointers - they must match for the pointer to be considered valid. | |
10 | * | |
11 | * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a | |
12 | * bucket simply by incrementing its gen. | |
13 | * | |
14 | * The gens (along with the priorities; it's really the gens are important but | |
15 | * the code is named as if it's the priorities) are written in an arbitrary list | |
16 | * of buckets on disk, with a pointer to them in the journal header. | |
17 | * | |
18 | * When we invalidate a bucket, we have to write its new gen to disk and wait | |
19 | * for that write to complete before we use it - otherwise after a crash we | |
20 | * could have pointers that appeared to be good but pointed to data that had | |
21 | * been overwritten. | |
22 | * | |
23 | * Since the gens and priorities are all stored contiguously on disk, we can | |
24 | * batch this up: We fill up the free_inc list with freshly invalidated buckets, | |
25 | * call prio_write(), and when prio_write() finishes we pull buckets off the | |
26 | * free_inc list and optionally discard them. | |
27 | * | |
28 | * free_inc isn't the only freelist - if it was, we'd often have to sleep while | |
29 | * priorities and gens were being written before we could allocate. c->free is a | |
30 | * smaller freelist, and buckets on that list are always ready to be used. | |
31 | * | |
32 | * If we've got discards enabled, that happens when a bucket moves from the | |
33 | * free_inc list to the free list. | |
34 | * | |
35 | * It's important to ensure that gens don't wrap around - with respect to | |
36 | * either the oldest gen in the btree or the gen on disk. This is quite | |
37 | * difficult to do in practice, but we explicitly guard against it anyways - if | |
38 | * a bucket is in danger of wrapping around we simply skip invalidating it that | |
39 | * time around, and we garbage collect or rewrite the priorities sooner than we | |
40 | * would have otherwise. | |
41 | * | |
42 | * bch2_bucket_alloc() allocates a single bucket from a specific device. | |
43 | * | |
44 | * bch2_bucket_alloc_set() allocates one or more buckets from different devices | |
45 | * in a given filesystem. | |
46 | * | |
47 | * invalidate_buckets() drives all the processes described above. It's called | |
48 | * from bch2_bucket_alloc() and a few other places that need to make sure free | |
49 | * buckets are ready. | |
50 | * | |
51 | * invalidate_buckets_(lru|fifo)() find buckets that are available to be | |
52 | * invalidated, and then invalidate them and stick them on the free_inc list - | |
53 | * in either lru or fifo order. | |
54 | */ | |
55 | ||
56 | #include "bcachefs.h" | |
57 | #include "alloc.h" | |
58 | #include "btree_cache.h" | |
59 | #include "btree_io.h" | |
60 | #include "btree_update.h" | |
61 | #include "btree_update_interior.h" | |
62 | #include "btree_gc.h" | |
63 | #include "buckets.h" | |
64 | #include "checksum.h" | |
65 | #include "clock.h" | |
66 | #include "debug.h" | |
67 | #include "disk_groups.h" | |
68 | #include "error.h" | |
69 | #include "extents.h" | |
70 | #include "io.h" | |
71 | #include "journal.h" | |
72 | #include "journal_io.h" | |
73 | #include "super-io.h" | |
74 | #include "trace.h" | |
75 | ||
76 | #include <linux/blkdev.h> | |
77 | #include <linux/kthread.h> | |
78 | #include <linux/math64.h> | |
79 | #include <linux/random.h> | |
80 | #include <linux/rculist.h> | |
81 | #include <linux/rcupdate.h> | |
82 | #include <linux/sched/task.h> | |
83 | #include <linux/sort.h> | |
84 | ||
85 | static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int); | |
86 | ||
87 | /* Ratelimiting/PD controllers */ | |
88 | ||
89 | static void pd_controllers_update(struct work_struct *work) | |
90 | { | |
91 | struct bch_fs *c = container_of(to_delayed_work(work), | |
92 | struct bch_fs, | |
93 | pd_controllers_update); | |
94 | struct bch_dev *ca; | |
95 | unsigned i; | |
96 | ||
97 | for_each_member_device(ca, c, i) { | |
98 | struct bch_dev_usage stats = bch2_dev_usage_read(c, ca); | |
99 | ||
100 | u64 free = bucket_to_sector(ca, | |
101 | __dev_buckets_free(ca, stats)) << 9; | |
102 | /* | |
103 | * Bytes of internal fragmentation, which can be | |
104 | * reclaimed by copy GC | |
105 | */ | |
106 | s64 fragmented = (bucket_to_sector(ca, | |
107 | stats.buckets[BCH_DATA_USER] + | |
108 | stats.buckets[BCH_DATA_CACHED]) - | |
109 | (stats.sectors[BCH_DATA_USER] + | |
110 | stats.sectors[BCH_DATA_CACHED])) << 9; | |
111 | ||
112 | fragmented = max(0LL, fragmented); | |
113 | ||
114 | bch2_pd_controller_update(&ca->copygc_pd, | |
115 | free, fragmented, -1); | |
116 | } | |
117 | ||
118 | schedule_delayed_work(&c->pd_controllers_update, | |
119 | c->pd_controllers_update_seconds * HZ); | |
120 | } | |
121 | ||
122 | /* Persistent alloc info: */ | |
123 | ||
124 | static unsigned bch_alloc_val_u64s(const struct bch_alloc *a) | |
125 | { | |
126 | unsigned bytes = offsetof(struct bch_alloc, data); | |
127 | ||
128 | if (a->fields & (1 << BCH_ALLOC_FIELD_READ_TIME)) | |
129 | bytes += 2; | |
130 | if (a->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME)) | |
131 | bytes += 2; | |
132 | ||
133 | return DIV_ROUND_UP(bytes, sizeof(u64)); | |
134 | } | |
135 | ||
136 | const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k) | |
137 | { | |
138 | if (k.k->p.inode >= c->sb.nr_devices || | |
139 | !c->devs[k.k->p.inode]) | |
140 | return "invalid device"; | |
141 | ||
142 | switch (k.k->type) { | |
143 | case BCH_ALLOC: { | |
144 | struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); | |
145 | ||
146 | if (bch_alloc_val_u64s(a.v) != bkey_val_u64s(a.k)) | |
147 | return "incorrect value size"; | |
148 | break; | |
149 | } | |
150 | default: | |
151 | return "invalid type"; | |
152 | } | |
153 | ||
154 | return NULL; | |
155 | } | |
156 | ||
277c981c KO |
157 | int bch2_alloc_to_text(struct bch_fs *c, char *buf, |
158 | size_t size, struct bkey_s_c k) | |
1c6fdbd8 KO |
159 | { |
160 | buf[0] = '\0'; | |
161 | ||
162 | switch (k.k->type) { | |
163 | case BCH_ALLOC: | |
164 | break; | |
165 | } | |
277c981c KO |
166 | |
167 | return 0; | |
1c6fdbd8 KO |
168 | } |
169 | ||
170 | static inline unsigned get_alloc_field(const u8 **p, unsigned bytes) | |
171 | { | |
172 | unsigned v; | |
173 | ||
174 | switch (bytes) { | |
175 | case 1: | |
176 | v = **p; | |
177 | break; | |
178 | case 2: | |
179 | v = le16_to_cpup((void *) *p); | |
180 | break; | |
181 | case 4: | |
182 | v = le32_to_cpup((void *) *p); | |
183 | break; | |
184 | default: | |
185 | BUG(); | |
186 | } | |
187 | ||
188 | *p += bytes; | |
189 | return v; | |
190 | } | |
191 | ||
192 | static inline void put_alloc_field(u8 **p, unsigned bytes, unsigned v) | |
193 | { | |
194 | switch (bytes) { | |
195 | case 1: | |
196 | **p = v; | |
197 | break; | |
198 | case 2: | |
199 | *((__le16 *) *p) = cpu_to_le16(v); | |
200 | break; | |
201 | case 4: | |
202 | *((__le32 *) *p) = cpu_to_le32(v); | |
203 | break; | |
204 | default: | |
205 | BUG(); | |
206 | } | |
207 | ||
208 | *p += bytes; | |
209 | } | |
210 | ||
211 | static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k) | |
212 | { | |
213 | struct bch_dev *ca; | |
214 | struct bkey_s_c_alloc a; | |
215 | struct bucket_mark new; | |
216 | struct bucket *g; | |
217 | const u8 *d; | |
218 | ||
219 | if (k.k->type != BCH_ALLOC) | |
220 | return; | |
221 | ||
222 | a = bkey_s_c_to_alloc(k); | |
223 | ca = bch_dev_bkey_exists(c, a.k->p.inode); | |
224 | ||
225 | if (a.k->p.offset >= ca->mi.nbuckets) | |
226 | return; | |
227 | ||
228 | percpu_down_read(&c->usage_lock); | |
229 | ||
230 | g = bucket(ca, a.k->p.offset); | |
231 | bucket_cmpxchg(g, new, ({ | |
232 | new.gen = a.v->gen; | |
233 | new.gen_valid = 1; | |
234 | })); | |
235 | ||
236 | d = a.v->data; | |
237 | if (a.v->fields & (1 << BCH_ALLOC_FIELD_READ_TIME)) | |
238 | g->io_time[READ] = get_alloc_field(&d, 2); | |
239 | if (a.v->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME)) | |
240 | g->io_time[WRITE] = get_alloc_field(&d, 2); | |
241 | ||
242 | percpu_up_read(&c->usage_lock); | |
243 | } | |
244 | ||
245 | int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list) | |
246 | { | |
247 | struct journal_replay *r; | |
248 | struct btree_iter iter; | |
249 | struct bkey_s_c k; | |
250 | struct bch_dev *ca; | |
251 | unsigned i; | |
252 | int ret; | |
253 | ||
254 | for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) { | |
255 | bch2_alloc_read_key(c, k); | |
256 | bch2_btree_iter_cond_resched(&iter); | |
257 | } | |
258 | ||
259 | ret = bch2_btree_iter_unlock(&iter); | |
260 | if (ret) | |
261 | return ret; | |
262 | ||
263 | list_for_each_entry(r, journal_replay_list, list) { | |
264 | struct bkey_i *k, *n; | |
265 | struct jset_entry *entry; | |
266 | ||
267 | for_each_jset_key(k, n, entry, &r->j) | |
268 | if (entry->btree_id == BTREE_ID_ALLOC) | |
269 | bch2_alloc_read_key(c, bkey_i_to_s_c(k)); | |
270 | } | |
271 | ||
272 | mutex_lock(&c->bucket_clock[READ].lock); | |
273 | for_each_member_device(ca, c, i) { | |
274 | down_read(&ca->bucket_lock); | |
275 | bch2_recalc_oldest_io(c, ca, READ); | |
276 | up_read(&ca->bucket_lock); | |
277 | } | |
278 | mutex_unlock(&c->bucket_clock[READ].lock); | |
279 | ||
280 | mutex_lock(&c->bucket_clock[WRITE].lock); | |
281 | for_each_member_device(ca, c, i) { | |
282 | down_read(&ca->bucket_lock); | |
283 | bch2_recalc_oldest_io(c, ca, WRITE); | |
284 | up_read(&ca->bucket_lock); | |
285 | } | |
286 | mutex_unlock(&c->bucket_clock[WRITE].lock); | |
287 | ||
288 | return 0; | |
289 | } | |
290 | ||
291 | static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca, | |
292 | size_t b, struct btree_iter *iter, | |
b29e197a | 293 | u64 *journal_seq, unsigned flags) |
1c6fdbd8 KO |
294 | { |
295 | struct bucket_mark m; | |
296 | __BKEY_PADDED(k, DIV_ROUND_UP(sizeof(struct bch_alloc), 8)) alloc_key; | |
297 | struct bucket *g; | |
298 | struct bkey_i_alloc *a; | |
299 | u8 *d; | |
1c6fdbd8 | 300 | |
b29e197a KO |
301 | percpu_down_read(&c->usage_lock); |
302 | g = bucket(ca, b); | |
303 | ||
304 | m = READ_ONCE(g->mark); | |
305 | a = bkey_alloc_init(&alloc_key.k); | |
306 | a->k.p = POS(ca->dev_idx, b); | |
307 | a->v.fields = 0; | |
308 | a->v.gen = m.gen; | |
309 | set_bkey_val_u64s(&a->k, bch_alloc_val_u64s(&a->v)); | |
310 | ||
311 | d = a->v.data; | |
312 | if (a->v.fields & (1 << BCH_ALLOC_FIELD_READ_TIME)) | |
313 | put_alloc_field(&d, 2, g->io_time[READ]); | |
314 | if (a->v.fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME)) | |
315 | put_alloc_field(&d, 2, g->io_time[WRITE]); | |
316 | percpu_up_read(&c->usage_lock); | |
1c6fdbd8 | 317 | |
b29e197a | 318 | bch2_btree_iter_cond_resched(iter); |
1c6fdbd8 | 319 | |
b29e197a | 320 | bch2_btree_iter_set_pos(iter, a->k.p); |
1c6fdbd8 | 321 | |
b29e197a KO |
322 | return bch2_btree_insert_at(c, NULL, NULL, journal_seq, |
323 | BTREE_INSERT_NOFAIL| | |
324 | BTREE_INSERT_USE_RESERVE| | |
325 | BTREE_INSERT_USE_ALLOC_RESERVE| | |
326 | flags, | |
327 | BTREE_INSERT_ENTRY(iter, &a->k_i)); | |
1c6fdbd8 KO |
328 | } |
329 | ||
330 | int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos) | |
331 | { | |
332 | struct bch_dev *ca; | |
333 | struct btree_iter iter; | |
334 | int ret; | |
335 | ||
336 | if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode]) | |
337 | return 0; | |
338 | ||
339 | ca = bch_dev_bkey_exists(c, pos.inode); | |
340 | ||
341 | if (pos.offset >= ca->mi.nbuckets) | |
342 | return 0; | |
343 | ||
344 | bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN, | |
345 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
346 | ||
b29e197a | 347 | ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter, NULL, 0); |
1c6fdbd8 KO |
348 | bch2_btree_iter_unlock(&iter); |
349 | return ret; | |
350 | } | |
351 | ||
352 | int bch2_alloc_write(struct bch_fs *c) | |
353 | { | |
354 | struct bch_dev *ca; | |
355 | unsigned i; | |
356 | int ret = 0; | |
357 | ||
358 | for_each_rw_member(ca, c, i) { | |
359 | struct btree_iter iter; | |
360 | unsigned long bucket; | |
361 | ||
362 | bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN, | |
363 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
364 | ||
365 | down_read(&ca->bucket_lock); | |
366 | for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) { | |
b29e197a KO |
367 | ret = __bch2_alloc_write_key(c, ca, bucket, |
368 | &iter, NULL, 0); | |
1c6fdbd8 KO |
369 | if (ret) |
370 | break; | |
371 | ||
372 | clear_bit(bucket, ca->buckets_dirty); | |
373 | } | |
374 | up_read(&ca->bucket_lock); | |
375 | bch2_btree_iter_unlock(&iter); | |
376 | ||
377 | if (ret) { | |
378 | percpu_ref_put(&ca->io_ref); | |
379 | break; | |
380 | } | |
381 | } | |
382 | ||
383 | return ret; | |
384 | } | |
385 | ||
386 | /* Bucket IO clocks: */ | |
387 | ||
388 | static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw) | |
389 | { | |
390 | struct bucket_clock *clock = &c->bucket_clock[rw]; | |
391 | struct bucket_array *buckets = bucket_array(ca); | |
392 | struct bucket *g; | |
393 | u16 max_last_io = 0; | |
394 | unsigned i; | |
395 | ||
396 | lockdep_assert_held(&c->bucket_clock[rw].lock); | |
397 | ||
398 | /* Recalculate max_last_io for this device: */ | |
399 | for_each_bucket(g, buckets) | |
400 | max_last_io = max(max_last_io, bucket_last_io(c, g, rw)); | |
401 | ||
402 | ca->max_last_bucket_io[rw] = max_last_io; | |
403 | ||
404 | /* Recalculate global max_last_io: */ | |
405 | max_last_io = 0; | |
406 | ||
407 | for_each_member_device(ca, c, i) | |
408 | max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]); | |
409 | ||
410 | clock->max_last_io = max_last_io; | |
411 | } | |
412 | ||
413 | static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw) | |
414 | { | |
415 | struct bucket_clock *clock = &c->bucket_clock[rw]; | |
416 | struct bucket_array *buckets; | |
417 | struct bch_dev *ca; | |
418 | struct bucket *g; | |
419 | unsigned i; | |
420 | ||
421 | trace_rescale_prios(c); | |
422 | ||
423 | for_each_member_device(ca, c, i) { | |
424 | down_read(&ca->bucket_lock); | |
425 | buckets = bucket_array(ca); | |
426 | ||
427 | for_each_bucket(g, buckets) | |
428 | g->io_time[rw] = clock->hand - | |
429 | bucket_last_io(c, g, rw) / 2; | |
430 | ||
431 | bch2_recalc_oldest_io(c, ca, rw); | |
432 | ||
433 | up_read(&ca->bucket_lock); | |
434 | } | |
435 | } | |
436 | ||
437 | static void bch2_inc_clock_hand(struct io_timer *timer) | |
438 | { | |
439 | struct bucket_clock *clock = container_of(timer, | |
440 | struct bucket_clock, rescale); | |
441 | struct bch_fs *c = container_of(clock, | |
442 | struct bch_fs, bucket_clock[clock->rw]); | |
443 | struct bch_dev *ca; | |
444 | u64 capacity; | |
445 | unsigned i; | |
446 | ||
447 | mutex_lock(&clock->lock); | |
448 | ||
449 | /* if clock cannot be advanced more, rescale prio */ | |
450 | if (clock->max_last_io >= U16_MAX - 2) | |
451 | bch2_rescale_bucket_io_times(c, clock->rw); | |
452 | ||
453 | BUG_ON(clock->max_last_io >= U16_MAX - 2); | |
454 | ||
455 | for_each_member_device(ca, c, i) | |
456 | ca->max_last_bucket_io[clock->rw]++; | |
457 | clock->max_last_io++; | |
458 | clock->hand++; | |
459 | ||
460 | mutex_unlock(&clock->lock); | |
461 | ||
462 | capacity = READ_ONCE(c->capacity); | |
463 | ||
464 | if (!capacity) | |
465 | return; | |
466 | ||
467 | /* | |
468 | * we only increment when 0.1% of the filesystem capacity has been read | |
469 | * or written too, this determines if it's time | |
470 | * | |
471 | * XXX: we shouldn't really be going off of the capacity of devices in | |
472 | * RW mode (that will be 0 when we're RO, yet we can still service | |
473 | * reads) | |
474 | */ | |
475 | timer->expire += capacity >> 10; | |
476 | ||
477 | bch2_io_timer_add(&c->io_clock[clock->rw], timer); | |
478 | } | |
479 | ||
480 | static void bch2_bucket_clock_init(struct bch_fs *c, int rw) | |
481 | { | |
482 | struct bucket_clock *clock = &c->bucket_clock[rw]; | |
483 | ||
484 | clock->hand = 1; | |
485 | clock->rw = rw; | |
486 | clock->rescale.fn = bch2_inc_clock_hand; | |
487 | clock->rescale.expire = c->capacity >> 10; | |
488 | mutex_init(&clock->lock); | |
489 | } | |
490 | ||
491 | /* Background allocator thread: */ | |
492 | ||
493 | /* | |
494 | * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens | |
495 | * (marking them as invalidated on disk), then optionally issues discard | |
496 | * commands to the newly free buckets, then puts them on the various freelists. | |
497 | */ | |
498 | ||
499 | static void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca, | |
500 | size_t bucket) | |
501 | { | |
502 | if (expensive_debug_checks(c) && | |
503 | test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags)) { | |
504 | size_t iter; | |
505 | long i; | |
506 | unsigned j; | |
507 | ||
508 | for (j = 0; j < RESERVE_NR; j++) | |
509 | fifo_for_each_entry(i, &ca->free[j], iter) | |
510 | BUG_ON(i == bucket); | |
511 | fifo_for_each_entry(i, &ca->free_inc, iter) | |
512 | BUG_ON(i == bucket); | |
513 | } | |
514 | } | |
515 | ||
516 | #define BUCKET_GC_GEN_MAX 96U | |
517 | ||
518 | /** | |
519 | * wait_buckets_available - wait on reclaimable buckets | |
520 | * | |
521 | * If there aren't enough available buckets to fill up free_inc, wait until | |
522 | * there are. | |
523 | */ | |
524 | static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca) | |
525 | { | |
526 | unsigned long gc_count = c->gc_count; | |
527 | int ret = 0; | |
528 | ||
529 | while (1) { | |
530 | set_current_state(TASK_INTERRUPTIBLE); | |
531 | if (kthread_should_stop()) { | |
532 | ret = 1; | |
533 | break; | |
534 | } | |
535 | ||
536 | if (gc_count != c->gc_count) | |
537 | ca->inc_gen_really_needs_gc = 0; | |
538 | ||
539 | if ((ssize_t) (dev_buckets_available(c, ca) - | |
540 | ca->inc_gen_really_needs_gc) >= | |
541 | (ssize_t) fifo_free(&ca->free_inc)) | |
542 | break; | |
543 | ||
544 | up_read(&c->gc_lock); | |
545 | schedule(); | |
546 | try_to_freeze(); | |
547 | down_read(&c->gc_lock); | |
548 | } | |
549 | ||
550 | __set_current_state(TASK_RUNNING); | |
551 | return ret; | |
552 | } | |
553 | ||
554 | static bool bch2_can_invalidate_bucket(struct bch_dev *ca, | |
555 | size_t bucket, | |
556 | struct bucket_mark mark) | |
557 | { | |
558 | u8 gc_gen; | |
559 | ||
560 | if (!is_available_bucket(mark)) | |
561 | return false; | |
562 | ||
563 | gc_gen = bucket_gc_gen(ca, bucket); | |
564 | ||
565 | if (gc_gen >= BUCKET_GC_GEN_MAX / 2) | |
566 | ca->inc_gen_needs_gc++; | |
567 | ||
568 | if (gc_gen >= BUCKET_GC_GEN_MAX) | |
569 | ca->inc_gen_really_needs_gc++; | |
570 | ||
571 | return gc_gen < BUCKET_GC_GEN_MAX; | |
572 | } | |
573 | ||
1c6fdbd8 KO |
574 | /* |
575 | * Determines what order we're going to reuse buckets, smallest bucket_key() | |
576 | * first. | |
577 | * | |
578 | * | |
579 | * - We take into account the read prio of the bucket, which gives us an | |
580 | * indication of how hot the data is -- we scale the prio so that the prio | |
581 | * farthest from the clock is worth 1/8th of the closest. | |
582 | * | |
583 | * - The number of sectors of cached data in the bucket, which gives us an | |
584 | * indication of the cost in cache misses this eviction will cause. | |
585 | * | |
586 | * - If hotness * sectors used compares equal, we pick the bucket with the | |
587 | * smallest bucket_gc_gen() - since incrementing the same bucket's generation | |
588 | * number repeatedly forces us to run mark and sweep gc to avoid generation | |
589 | * number wraparound. | |
590 | */ | |
591 | ||
592 | static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca, | |
593 | size_t b, struct bucket_mark m) | |
594 | { | |
595 | unsigned last_io = bucket_last_io(c, bucket(ca, b), READ); | |
596 | unsigned max_last_io = ca->max_last_bucket_io[READ]; | |
597 | ||
598 | /* | |
599 | * Time since last read, scaled to [0, 8) where larger value indicates | |
600 | * more recently read data: | |
601 | */ | |
602 | unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io; | |
603 | ||
604 | /* How much we want to keep the data in this bucket: */ | |
605 | unsigned long data_wantness = | |
606 | (hotness + 1) * bucket_sectors_used(m); | |
607 | ||
608 | unsigned long needs_journal_commit = | |
609 | bucket_needs_journal_commit(m, c->journal.last_seq_ondisk); | |
610 | ||
611 | return (data_wantness << 9) | | |
612 | (needs_journal_commit << 8) | | |
613 | bucket_gc_gen(ca, b); | |
614 | } | |
615 | ||
616 | static inline int bucket_alloc_cmp(alloc_heap *h, | |
617 | struct alloc_heap_entry l, | |
618 | struct alloc_heap_entry r) | |
619 | { | |
620 | return (l.key > r.key) - (l.key < r.key) ?: | |
621 | (l.nr < r.nr) - (l.nr > r.nr) ?: | |
622 | (l.bucket > r.bucket) - (l.bucket < r.bucket); | |
623 | } | |
624 | ||
b29e197a KO |
625 | static inline int bucket_idx_cmp(const void *_l, const void *_r) |
626 | { | |
627 | const struct alloc_heap_entry *l = _l, *r = _r; | |
628 | ||
629 | return (l->bucket > r->bucket) - (l->bucket < r->bucket); | |
630 | } | |
631 | ||
1c6fdbd8 KO |
632 | static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) |
633 | { | |
634 | struct bucket_array *buckets; | |
635 | struct alloc_heap_entry e = { 0 }; | |
b29e197a | 636 | size_t b, i, nr = 0; |
1c6fdbd8 KO |
637 | |
638 | ca->alloc_heap.used = 0; | |
639 | ||
640 | mutex_lock(&c->bucket_clock[READ].lock); | |
641 | down_read(&ca->bucket_lock); | |
642 | ||
643 | buckets = bucket_array(ca); | |
644 | ||
645 | bch2_recalc_oldest_io(c, ca, READ); | |
646 | ||
647 | /* | |
648 | * Find buckets with lowest read priority, by building a maxheap sorted | |
649 | * by read priority and repeatedly replacing the maximum element until | |
650 | * all buckets have been visited. | |
651 | */ | |
652 | for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) { | |
653 | struct bucket_mark m = READ_ONCE(buckets->b[b].mark); | |
654 | unsigned long key = bucket_sort_key(c, ca, b, m); | |
655 | ||
656 | if (!bch2_can_invalidate_bucket(ca, b, m)) | |
657 | continue; | |
658 | ||
659 | if (e.nr && e.bucket + e.nr == b && e.key == key) { | |
660 | e.nr++; | |
661 | } else { | |
662 | if (e.nr) | |
663 | heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp); | |
664 | ||
665 | e = (struct alloc_heap_entry) { | |
666 | .bucket = b, | |
667 | .nr = 1, | |
668 | .key = key, | |
669 | }; | |
670 | } | |
671 | ||
672 | cond_resched(); | |
673 | } | |
674 | ||
675 | if (e.nr) | |
676 | heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp); | |
677 | ||
b29e197a KO |
678 | for (i = 0; i < ca->alloc_heap.used; i++) |
679 | nr += ca->alloc_heap.data[i].nr; | |
1c6fdbd8 | 680 | |
b29e197a KO |
681 | while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) { |
682 | nr -= ca->alloc_heap.data[0].nr; | |
683 | heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp); | |
1c6fdbd8 | 684 | } |
b29e197a KO |
685 | |
686 | up_read(&ca->bucket_lock); | |
687 | mutex_unlock(&c->bucket_clock[READ].lock); | |
1c6fdbd8 KO |
688 | } |
689 | ||
690 | static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca) | |
691 | { | |
692 | struct bucket_array *buckets = bucket_array(ca); | |
693 | struct bucket_mark m; | |
b29e197a | 694 | size_t b, start; |
1c6fdbd8 | 695 | |
b29e197a KO |
696 | if (ca->fifo_last_bucket < ca->mi.first_bucket || |
697 | ca->fifo_last_bucket >= ca->mi.nbuckets) | |
698 | ca->fifo_last_bucket = ca->mi.first_bucket; | |
699 | ||
700 | start = ca->fifo_last_bucket; | |
1c6fdbd8 | 701 | |
b29e197a KO |
702 | do { |
703 | ca->fifo_last_bucket++; | |
704 | if (ca->fifo_last_bucket == ca->mi.nbuckets) | |
705 | ca->fifo_last_bucket = ca->mi.first_bucket; | |
1c6fdbd8 | 706 | |
b29e197a | 707 | b = ca->fifo_last_bucket; |
1c6fdbd8 KO |
708 | m = READ_ONCE(buckets->b[b].mark); |
709 | ||
b29e197a KO |
710 | if (bch2_can_invalidate_bucket(ca, b, m)) { |
711 | struct alloc_heap_entry e = { .bucket = b, .nr = 1, }; | |
712 | ||
713 | heap_add(&ca->alloc_heap, e, bucket_alloc_cmp); | |
714 | if (heap_full(&ca->alloc_heap)) | |
715 | break; | |
716 | } | |
1c6fdbd8 KO |
717 | |
718 | cond_resched(); | |
b29e197a | 719 | } while (ca->fifo_last_bucket != start); |
1c6fdbd8 KO |
720 | } |
721 | ||
722 | static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca) | |
723 | { | |
724 | struct bucket_array *buckets = bucket_array(ca); | |
725 | struct bucket_mark m; | |
b29e197a | 726 | size_t checked, i; |
1c6fdbd8 KO |
727 | |
728 | for (checked = 0; | |
b29e197a | 729 | checked < ca->mi.nbuckets / 2; |
1c6fdbd8 KO |
730 | checked++) { |
731 | size_t b = bch2_rand_range(ca->mi.nbuckets - | |
732 | ca->mi.first_bucket) + | |
733 | ca->mi.first_bucket; | |
734 | ||
735 | m = READ_ONCE(buckets->b[b].mark); | |
736 | ||
b29e197a KO |
737 | if (bch2_can_invalidate_bucket(ca, b, m)) { |
738 | struct alloc_heap_entry e = { .bucket = b, .nr = 1, }; | |
739 | ||
740 | heap_add(&ca->alloc_heap, e, bucket_alloc_cmp); | |
741 | if (heap_full(&ca->alloc_heap)) | |
742 | break; | |
743 | } | |
1c6fdbd8 KO |
744 | |
745 | cond_resched(); | |
746 | } | |
b29e197a KO |
747 | |
748 | sort(ca->alloc_heap.data, | |
749 | ca->alloc_heap.used, | |
750 | sizeof(ca->alloc_heap.data[0]), | |
751 | bucket_idx_cmp, NULL); | |
752 | ||
753 | /* remove duplicates: */ | |
754 | for (i = 0; i + 1 < ca->alloc_heap.used; i++) | |
755 | if (ca->alloc_heap.data[i].bucket == | |
756 | ca->alloc_heap.data[i + 1].bucket) | |
757 | ca->alloc_heap.data[i].nr = 0; | |
1c6fdbd8 KO |
758 | } |
759 | ||
b29e197a | 760 | static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca) |
1c6fdbd8 | 761 | { |
b29e197a KO |
762 | size_t i, nr = 0; |
763 | ||
1c6fdbd8 | 764 | ca->inc_gen_needs_gc = 0; |
1c6fdbd8 KO |
765 | |
766 | switch (ca->mi.replacement) { | |
767 | case CACHE_REPLACEMENT_LRU: | |
768 | find_reclaimable_buckets_lru(c, ca); | |
769 | break; | |
770 | case CACHE_REPLACEMENT_FIFO: | |
771 | find_reclaimable_buckets_fifo(c, ca); | |
772 | break; | |
773 | case CACHE_REPLACEMENT_RANDOM: | |
774 | find_reclaimable_buckets_random(c, ca); | |
775 | break; | |
776 | } | |
b29e197a KO |
777 | |
778 | heap_resort(&ca->alloc_heap, bucket_alloc_cmp); | |
779 | ||
780 | for (i = 0; i < ca->alloc_heap.used; i++) | |
781 | nr += ca->alloc_heap.data[i].nr; | |
782 | ||
783 | return nr; | |
1c6fdbd8 KO |
784 | } |
785 | ||
b29e197a | 786 | static inline long next_alloc_bucket(struct bch_dev *ca) |
1c6fdbd8 | 787 | { |
b29e197a KO |
788 | struct alloc_heap_entry e, *top = ca->alloc_heap.data; |
789 | ||
790 | while (ca->alloc_heap.used) { | |
791 | if (top->nr) { | |
792 | size_t b = top->bucket; | |
793 | ||
794 | top->bucket++; | |
795 | top->nr--; | |
796 | return b; | |
797 | } | |
1c6fdbd8 | 798 | |
b29e197a KO |
799 | heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp); |
800 | } | |
801 | ||
802 | return -1; | |
1c6fdbd8 KO |
803 | } |
804 | ||
b29e197a KO |
805 | static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, |
806 | size_t bucket, u64 *flush_seq) | |
1c6fdbd8 | 807 | { |
b29e197a | 808 | struct bucket_mark m; |
1c6fdbd8 | 809 | |
b29e197a | 810 | percpu_down_read(&c->usage_lock); |
1c6fdbd8 | 811 | spin_lock(&c->freelist_lock); |
b29e197a KO |
812 | |
813 | bch2_invalidate_bucket(c, ca, bucket, &m); | |
814 | ||
815 | verify_not_on_freelist(c, ca, bucket); | |
816 | BUG_ON(!fifo_push(&ca->free_inc, bucket)); | |
817 | ||
1c6fdbd8 | 818 | spin_unlock(&c->freelist_lock); |
b29e197a KO |
819 | |
820 | bucket_io_clock_reset(c, ca, bucket, READ); | |
821 | bucket_io_clock_reset(c, ca, bucket, WRITE); | |
822 | ||
823 | percpu_up_read(&c->usage_lock); | |
824 | ||
825 | if (m.journal_seq_valid) { | |
826 | u64 journal_seq = atomic64_read(&c->journal.seq); | |
827 | u64 bucket_seq = journal_seq; | |
828 | ||
829 | bucket_seq &= ~((u64) U16_MAX); | |
830 | bucket_seq |= m.journal_seq; | |
831 | ||
832 | if (bucket_seq > journal_seq) | |
833 | bucket_seq -= 1 << 16; | |
834 | ||
835 | *flush_seq = max(*flush_seq, bucket_seq); | |
836 | } | |
837 | ||
838 | return m.cached_sectors != 0; | |
1c6fdbd8 KO |
839 | } |
840 | ||
b29e197a KO |
841 | /* |
842 | * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc: | |
843 | */ | |
844 | static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca) | |
1c6fdbd8 KO |
845 | { |
846 | struct btree_iter iter; | |
b29e197a | 847 | u64 journal_seq = 0; |
1c6fdbd8 | 848 | int ret = 0; |
b29e197a | 849 | long b; |
1c6fdbd8 KO |
850 | |
851 | bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), | |
852 | BTREE_ITER_SLOTS|BTREE_ITER_INTENT); | |
853 | ||
854 | /* Only use nowait if we've already invalidated at least one bucket: */ | |
b29e197a KO |
855 | while (!ret && |
856 | !fifo_full(&ca->free_inc) && | |
857 | (b = next_alloc_bucket(ca)) >= 0) { | |
858 | bool must_flush = | |
859 | bch2_invalidate_one_bucket(c, ca, b, &journal_seq); | |
860 | ||
861 | ret = __bch2_alloc_write_key(c, ca, b, &iter, | |
862 | must_flush ? &journal_seq : NULL, | |
863 | !fifo_empty(&ca->free_inc) ? BTREE_INSERT_NOWAIT : 0); | |
1c6fdbd8 KO |
864 | } |
865 | ||
866 | bch2_btree_iter_unlock(&iter); | |
867 | ||
868 | /* If we used NOWAIT, don't return the error: */ | |
b29e197a KO |
869 | if (!fifo_empty(&ca->free_inc)) |
870 | ret = 0; | |
871 | if (ret) { | |
872 | bch_err(ca, "error invalidating buckets: %i", ret); | |
873 | return ret; | |
874 | } | |
1c6fdbd8 | 875 | |
b29e197a KO |
876 | if (journal_seq) |
877 | ret = bch2_journal_flush_seq(&c->journal, journal_seq); | |
878 | if (ret) { | |
879 | bch_err(ca, "journal error: %i", ret); | |
880 | return ret; | |
881 | } | |
1c6fdbd8 | 882 | |
b29e197a | 883 | return 0; |
1c6fdbd8 KO |
884 | } |
885 | ||
886 | static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket) | |
887 | { | |
b29e197a | 888 | unsigned i; |
1c6fdbd8 KO |
889 | int ret = 0; |
890 | ||
891 | while (1) { | |
892 | set_current_state(TASK_INTERRUPTIBLE); | |
893 | ||
b29e197a KO |
894 | spin_lock(&c->freelist_lock); |
895 | for (i = 0; i < RESERVE_NR; i++) | |
896 | if (fifo_push(&ca->free[i], bucket)) { | |
897 | fifo_pop(&ca->free_inc, bucket); | |
898 | closure_wake_up(&c->freelist_wait); | |
899 | spin_unlock(&c->freelist_lock); | |
900 | goto out; | |
901 | } | |
902 | spin_unlock(&c->freelist_lock); | |
1c6fdbd8 KO |
903 | |
904 | if ((current->flags & PF_KTHREAD) && | |
905 | kthread_should_stop()) { | |
906 | ret = 1; | |
907 | break; | |
908 | } | |
909 | ||
910 | schedule(); | |
911 | try_to_freeze(); | |
912 | } | |
b29e197a | 913 | out: |
1c6fdbd8 KO |
914 | __set_current_state(TASK_RUNNING); |
915 | return ret; | |
916 | } | |
917 | ||
918 | /* | |
b29e197a KO |
919 | * Pulls buckets off free_inc, discards them (if enabled), then adds them to |
920 | * freelists, waiting until there's room if necessary: | |
1c6fdbd8 KO |
921 | */ |
922 | static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca) | |
923 | { | |
b29e197a | 924 | while (!fifo_empty(&ca->free_inc)) { |
1c6fdbd8 KO |
925 | size_t bucket = fifo_peek(&ca->free_inc); |
926 | ||
1c6fdbd8 KO |
927 | if (ca->mi.discard && |
928 | bdev_max_discard_sectors(ca->disk_sb.bdev)) | |
929 | blkdev_issue_discard(ca->disk_sb.bdev, | |
930 | bucket_to_sector(ca, bucket), | |
931 | ca->mi.bucket_size, GFP_NOIO); | |
932 | ||
933 | if (push_invalidated_bucket(c, ca, bucket)) | |
934 | return 1; | |
935 | } | |
936 | ||
937 | return 0; | |
938 | } | |
939 | ||
940 | /** | |
941 | * bch_allocator_thread - move buckets from free_inc to reserves | |
942 | * | |
943 | * The free_inc FIFO is populated by find_reclaimable_buckets(), and | |
944 | * the reserves are depleted by bucket allocation. When we run out | |
945 | * of free_inc, try to invalidate some buckets and write out | |
946 | * prios and gens. | |
947 | */ | |
948 | static int bch2_allocator_thread(void *arg) | |
949 | { | |
950 | struct bch_dev *ca = arg; | |
951 | struct bch_fs *c = ca->fs; | |
b29e197a | 952 | size_t nr; |
1c6fdbd8 KO |
953 | int ret; |
954 | ||
955 | set_freezable(); | |
956 | ||
957 | while (1) { | |
b29e197a | 958 | cond_resched(); |
1c6fdbd8 | 959 | |
b29e197a KO |
960 | pr_debug("discarding %zu invalidated buckets", |
961 | fifo_used(&ca->free_inc)); | |
1c6fdbd8 | 962 | |
b29e197a KO |
963 | ret = discard_invalidated_buckets(c, ca); |
964 | if (ret) | |
965 | goto stop; | |
1c6fdbd8 | 966 | |
b29e197a KO |
967 | ret = bch2_invalidate_buckets(c, ca); |
968 | if (ret) | |
969 | goto stop; | |
1c6fdbd8 | 970 | |
b29e197a KO |
971 | if (!fifo_empty(&ca->free_inc)) |
972 | continue; | |
1c6fdbd8 KO |
973 | |
974 | pr_debug("free_inc now empty"); | |
975 | ||
1c6fdbd8 | 976 | down_read(&c->gc_lock); |
b29e197a | 977 | do { |
1c6fdbd8 KO |
978 | if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) { |
979 | up_read(&c->gc_lock); | |
980 | bch_err(ca, "gc failure"); | |
981 | goto stop; | |
982 | } | |
983 | ||
984 | /* | |
985 | * Find some buckets that we can invalidate, either | |
986 | * they're completely unused, or only contain clean data | |
987 | * that's been written back to the backing device or | |
988 | * another cache tier | |
989 | */ | |
990 | ||
991 | pr_debug("scanning for reclaimable buckets"); | |
992 | ||
b29e197a | 993 | nr = find_reclaimable_buckets(c, ca); |
1c6fdbd8 | 994 | |
b29e197a | 995 | pr_debug("found %zu buckets", nr); |
1c6fdbd8 | 996 | |
b29e197a | 997 | trace_alloc_batch(ca, nr, ca->alloc_heap.size); |
1c6fdbd8 | 998 | |
b29e197a KO |
999 | if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) || |
1000 | ca->inc_gen_really_needs_gc) && | |
1c6fdbd8 KO |
1001 | c->gc_thread) { |
1002 | atomic_inc(&c->kick_gc); | |
1003 | wake_up_process(c->gc_thread); | |
1004 | } | |
1005 | ||
1c6fdbd8 | 1006 | /* |
b29e197a KO |
1007 | * If we found any buckets, we have to invalidate them |
1008 | * before we scan for more - but if we didn't find very | |
1009 | * many we may want to wait on more buckets being | |
1010 | * available so we don't spin: | |
1c6fdbd8 | 1011 | */ |
b29e197a KO |
1012 | if (!nr || |
1013 | (nr < ALLOC_SCAN_BATCH(ca) && | |
1014 | !fifo_full(&ca->free[RESERVE_MOVINGGC]))) { | |
1015 | ca->allocator_blocked = true; | |
1016 | closure_wake_up(&c->freelist_wait); | |
1017 | ||
1018 | ret = wait_buckets_available(c, ca); | |
1019 | if (ret) { | |
1020 | up_read(&c->gc_lock); | |
1021 | goto stop; | |
1022 | } | |
1c6fdbd8 | 1023 | } |
b29e197a | 1024 | } while (!nr); |
1c6fdbd8 KO |
1025 | |
1026 | ca->allocator_blocked = false; | |
1027 | up_read(&c->gc_lock); | |
1028 | ||
b29e197a | 1029 | pr_debug("%zu buckets to invalidate", nr); |
1c6fdbd8 KO |
1030 | |
1031 | /* | |
b29e197a | 1032 | * alloc_heap is now full of newly-invalidated buckets: next, |
1c6fdbd8 KO |
1033 | * write out the new bucket gens: |
1034 | */ | |
1035 | } | |
1036 | ||
1037 | stop: | |
1038 | pr_debug("alloc thread stopping (ret %i)", ret); | |
1039 | return 0; | |
1040 | } | |
1041 | ||
1042 | /* Allocation */ | |
1043 | ||
1044 | /* | |
1045 | * Open buckets represent a bucket that's currently being allocated from. They | |
1046 | * serve two purposes: | |
1047 | * | |
1048 | * - They track buckets that have been partially allocated, allowing for | |
1049 | * sub-bucket sized allocations - they're used by the sector allocator below | |
1050 | * | |
1051 | * - They provide a reference to the buckets they own that mark and sweep GC | |
1052 | * can find, until the new allocation has a pointer to it inserted into the | |
1053 | * btree | |
1054 | * | |
1055 | * When allocating some space with the sector allocator, the allocation comes | |
1056 | * with a reference to an open bucket - the caller is required to put that | |
1057 | * reference _after_ doing the index update that makes its allocation reachable. | |
1058 | */ | |
1059 | ||
1060 | void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) | |
1061 | { | |
1062 | struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev); | |
1063 | ||
1064 | percpu_down_read(&c->usage_lock); | |
1065 | spin_lock(&ob->lock); | |
1066 | ||
1067 | bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), | |
1068 | false, gc_pos_alloc(c, ob), 0); | |
1069 | ob->valid = false; | |
1070 | ||
1071 | spin_unlock(&ob->lock); | |
1072 | percpu_up_read(&c->usage_lock); | |
1073 | ||
1074 | spin_lock(&c->freelist_lock); | |
1075 | ob->freelist = c->open_buckets_freelist; | |
1076 | c->open_buckets_freelist = ob - c->open_buckets; | |
1077 | c->open_buckets_nr_free++; | |
1078 | spin_unlock(&c->freelist_lock); | |
1079 | ||
1080 | closure_wake_up(&c->open_buckets_wait); | |
1081 | } | |
1082 | ||
1083 | static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c) | |
1084 | { | |
1085 | struct open_bucket *ob; | |
1086 | ||
1087 | BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free); | |
1088 | ||
1089 | ob = c->open_buckets + c->open_buckets_freelist; | |
1090 | c->open_buckets_freelist = ob->freelist; | |
1091 | atomic_set(&ob->pin, 1); | |
1092 | ||
1093 | c->open_buckets_nr_free--; | |
1094 | return ob; | |
1095 | } | |
1096 | ||
1097 | /* _only_ for allocating the journal on a new device: */ | |
1098 | long bch2_bucket_alloc_new_fs(struct bch_dev *ca) | |
1099 | { | |
1100 | struct bucket_array *buckets; | |
1101 | ssize_t b; | |
1102 | ||
1103 | rcu_read_lock(); | |
1104 | buckets = bucket_array(ca); | |
1105 | ||
1106 | for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) | |
1107 | if (is_available_bucket(buckets->b[b].mark)) | |
1108 | goto success; | |
1109 | b = -1; | |
1110 | success: | |
1111 | rcu_read_unlock(); | |
1112 | return b; | |
1113 | } | |
1114 | ||
1115 | static inline unsigned open_buckets_reserved(enum alloc_reserve reserve) | |
1116 | { | |
1117 | switch (reserve) { | |
1118 | case RESERVE_ALLOC: | |
1119 | return 0; | |
1120 | case RESERVE_BTREE: | |
1121 | return BTREE_NODE_RESERVE / 2; | |
1122 | default: | |
1123 | return BTREE_NODE_RESERVE; | |
1124 | } | |
1125 | } | |
1126 | ||
1127 | /** | |
1128 | * bch_bucket_alloc - allocate a single bucket from a specific device | |
1129 | * | |
1130 | * Returns index of bucket on success, 0 on failure | |
1131 | * */ | |
1132 | int bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, | |
1133 | enum alloc_reserve reserve, | |
1134 | bool may_alloc_partial, | |
1135 | struct closure *cl) | |
1136 | { | |
1137 | struct bucket_array *buckets; | |
1138 | struct open_bucket *ob; | |
1139 | long bucket; | |
1140 | ||
1141 | spin_lock(&c->freelist_lock); | |
1142 | ||
1143 | if (may_alloc_partial && | |
1144 | ca->open_buckets_partial_nr) { | |
1145 | int ret = ca->open_buckets_partial[--ca->open_buckets_partial_nr]; | |
1146 | c->open_buckets[ret].on_partial_list = false; | |
1147 | spin_unlock(&c->freelist_lock); | |
1148 | return ret; | |
1149 | } | |
1150 | ||
1151 | if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) { | |
1152 | if (cl) | |
1153 | closure_wait(&c->open_buckets_wait, cl); | |
1154 | spin_unlock(&c->freelist_lock); | |
1155 | trace_open_bucket_alloc_fail(ca, reserve); | |
1156 | return OPEN_BUCKETS_EMPTY; | |
1157 | } | |
1158 | ||
1159 | if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket))) | |
1160 | goto out; | |
1161 | ||
1162 | switch (reserve) { | |
1163 | case RESERVE_ALLOC: | |
1164 | if (fifo_pop(&ca->free[RESERVE_BTREE], bucket)) | |
1165 | goto out; | |
1166 | break; | |
1167 | case RESERVE_BTREE: | |
1168 | if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >= | |
1169 | ca->free[RESERVE_BTREE].size && | |
1170 | fifo_pop(&ca->free[RESERVE_BTREE], bucket)) | |
1171 | goto out; | |
1172 | break; | |
1173 | case RESERVE_MOVINGGC: | |
1174 | if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket)) | |
1175 | goto out; | |
1176 | break; | |
1177 | default: | |
1178 | break; | |
1179 | } | |
1180 | ||
1181 | if (cl) | |
1182 | closure_wait(&c->freelist_wait, cl); | |
1183 | ||
1184 | spin_unlock(&c->freelist_lock); | |
1185 | ||
1186 | trace_bucket_alloc_fail(ca, reserve); | |
1187 | return FREELIST_EMPTY; | |
1188 | out: | |
1189 | verify_not_on_freelist(c, ca, bucket); | |
1190 | ||
1191 | ob = bch2_open_bucket_alloc(c); | |
1192 | ||
1193 | spin_lock(&ob->lock); | |
1194 | buckets = bucket_array(ca); | |
1195 | ||
1196 | ob->valid = true; | |
1197 | ob->sectors_free = ca->mi.bucket_size; | |
1198 | ob->ptr = (struct bch_extent_ptr) { | |
1199 | .gen = buckets->b[bucket].mark.gen, | |
1200 | .offset = bucket_to_sector(ca, bucket), | |
1201 | .dev = ca->dev_idx, | |
1202 | }; | |
1203 | ||
1204 | bucket_io_clock_reset(c, ca, bucket, READ); | |
1205 | bucket_io_clock_reset(c, ca, bucket, WRITE); | |
1206 | spin_unlock(&ob->lock); | |
1207 | ||
1208 | spin_unlock(&c->freelist_lock); | |
1209 | ||
1210 | bch2_wake_allocator(ca); | |
1211 | ||
1212 | trace_bucket_alloc(ca, reserve); | |
1213 | return ob - c->open_buckets; | |
1214 | } | |
1215 | ||
1216 | static int __dev_alloc_cmp(struct write_point *wp, | |
1217 | unsigned l, unsigned r) | |
1218 | { | |
1219 | return ((wp->next_alloc[l] > wp->next_alloc[r]) - | |
1220 | (wp->next_alloc[l] < wp->next_alloc[r])); | |
1221 | } | |
1222 | ||
1223 | #define dev_alloc_cmp(l, r) __dev_alloc_cmp(wp, l, r) | |
1224 | ||
1225 | struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *c, | |
1226 | struct write_point *wp, | |
1227 | struct bch_devs_mask *devs) | |
1228 | { | |
1229 | struct dev_alloc_list ret = { .nr = 0 }; | |
1230 | struct bch_dev *ca; | |
1231 | unsigned i; | |
1232 | ||
1233 | for_each_member_device_rcu(ca, c, i, devs) | |
1234 | ret.devs[ret.nr++] = i; | |
1235 | ||
1236 | bubble_sort(ret.devs, ret.nr, dev_alloc_cmp); | |
1237 | return ret; | |
1238 | } | |
1239 | ||
1240 | void bch2_wp_rescale(struct bch_fs *c, struct bch_dev *ca, | |
1241 | struct write_point *wp) | |
1242 | { | |
1243 | u64 *v = wp->next_alloc + ca->dev_idx; | |
1244 | u64 free_space = dev_buckets_free(c, ca); | |
1245 | u64 free_space_inv = free_space | |
1246 | ? div64_u64(1ULL << 48, free_space) | |
1247 | : 1ULL << 48; | |
1248 | u64 scale = *v / 4; | |
1249 | ||
1250 | if (*v + free_space_inv >= *v) | |
1251 | *v += free_space_inv; | |
1252 | else | |
1253 | *v = U64_MAX; | |
1254 | ||
1255 | for (v = wp->next_alloc; | |
1256 | v < wp->next_alloc + ARRAY_SIZE(wp->next_alloc); v++) | |
1257 | *v = *v < scale ? 0 : *v - scale; | |
1258 | } | |
1259 | ||
1260 | static enum bucket_alloc_ret bch2_bucket_alloc_set(struct bch_fs *c, | |
1261 | struct write_point *wp, | |
1262 | unsigned nr_replicas, | |
1263 | enum alloc_reserve reserve, | |
1264 | struct bch_devs_mask *devs, | |
1265 | struct closure *cl) | |
1266 | { | |
1267 | enum bucket_alloc_ret ret = NO_DEVICES; | |
1268 | struct dev_alloc_list devs_sorted; | |
1269 | struct bch_dev *ca; | |
1270 | unsigned i, nr_ptrs_effective = 0; | |
1271 | bool have_cache_dev = false; | |
1272 | ||
1273 | BUG_ON(nr_replicas > ARRAY_SIZE(wp->ptrs)); | |
1274 | ||
1275 | for (i = wp->first_ptr; i < wp->nr_ptrs; i++) { | |
1276 | ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev); | |
1277 | ||
1278 | nr_ptrs_effective += ca->mi.durability; | |
1279 | have_cache_dev |= !ca->mi.durability; | |
1280 | } | |
1281 | ||
1282 | if (nr_ptrs_effective >= nr_replicas) | |
1283 | return ALLOC_SUCCESS; | |
1284 | ||
1285 | devs_sorted = bch2_wp_alloc_list(c, wp, devs); | |
1286 | ||
1287 | for (i = 0; i < devs_sorted.nr; i++) { | |
1288 | int ob; | |
1289 | ||
1290 | ca = rcu_dereference(c->devs[devs_sorted.devs[i]]); | |
1291 | if (!ca) | |
1292 | continue; | |
1293 | ||
1294 | if (!ca->mi.durability && | |
1295 | (have_cache_dev || | |
1296 | wp->type != BCH_DATA_USER)) | |
1297 | continue; | |
1298 | ||
1299 | ob = bch2_bucket_alloc(c, ca, reserve, | |
1300 | wp->type == BCH_DATA_USER, cl); | |
1301 | if (ob < 0) { | |
1302 | ret = ob; | |
1303 | if (ret == OPEN_BUCKETS_EMPTY) | |
1304 | break; | |
1305 | continue; | |
1306 | } | |
1307 | ||
1308 | BUG_ON(ob <= 0 || ob > U8_MAX); | |
1309 | BUG_ON(wp->nr_ptrs >= ARRAY_SIZE(wp->ptrs)); | |
1310 | ||
1311 | wp->ptrs[wp->nr_ptrs++] = c->open_buckets + ob; | |
1312 | ||
1313 | bch2_wp_rescale(c, ca, wp); | |
1314 | ||
1315 | nr_ptrs_effective += ca->mi.durability; | |
1316 | have_cache_dev |= !ca->mi.durability; | |
1317 | ||
1318 | __clear_bit(ca->dev_idx, devs->d); | |
1319 | ||
1320 | if (nr_ptrs_effective >= nr_replicas) { | |
1321 | ret = ALLOC_SUCCESS; | |
1322 | break; | |
1323 | } | |
1324 | } | |
1325 | ||
1326 | EBUG_ON(reserve == RESERVE_MOVINGGC && | |
1327 | ret != ALLOC_SUCCESS && | |
1328 | ret != OPEN_BUCKETS_EMPTY); | |
1329 | ||
1330 | switch (ret) { | |
1331 | case ALLOC_SUCCESS: | |
1332 | return 0; | |
1333 | case NO_DEVICES: | |
1334 | return -EROFS; | |
1335 | case FREELIST_EMPTY: | |
1336 | case OPEN_BUCKETS_EMPTY: | |
1337 | return cl ? -EAGAIN : -ENOSPC; | |
1338 | default: | |
1339 | BUG(); | |
1340 | } | |
1341 | } | |
1342 | ||
1343 | /* Sector allocator */ | |
1344 | ||
1345 | static void writepoint_drop_ptr(struct bch_fs *c, | |
1346 | struct write_point *wp, | |
1347 | unsigned i) | |
1348 | { | |
1349 | struct open_bucket *ob = wp->ptrs[i]; | |
1350 | struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev); | |
1351 | ||
1352 | BUG_ON(ca->open_buckets_partial_nr >= | |
1353 | ARRAY_SIZE(ca->open_buckets_partial)); | |
1354 | ||
1355 | if (wp->type == BCH_DATA_USER) { | |
1356 | spin_lock(&c->freelist_lock); | |
1357 | ob->on_partial_list = true; | |
1358 | ca->open_buckets_partial[ca->open_buckets_partial_nr++] = | |
1359 | ob - c->open_buckets; | |
1360 | spin_unlock(&c->freelist_lock); | |
1361 | ||
1362 | closure_wake_up(&c->open_buckets_wait); | |
1363 | closure_wake_up(&c->freelist_wait); | |
1364 | } else { | |
1365 | bch2_open_bucket_put(c, ob); | |
1366 | } | |
1367 | ||
1368 | array_remove_item(wp->ptrs, wp->nr_ptrs, i); | |
1369 | ||
1370 | if (i < wp->first_ptr) | |
1371 | wp->first_ptr--; | |
1372 | } | |
1373 | ||
1374 | static void writepoint_drop_ptrs(struct bch_fs *c, | |
1375 | struct write_point *wp, | |
1376 | u16 target, bool in_target) | |
1377 | { | |
1378 | int i; | |
1379 | ||
1380 | for (i = wp->first_ptr - 1; i >= 0; --i) | |
1381 | if (bch2_dev_in_target(c, wp->ptrs[i]->ptr.dev, | |
1382 | target) == in_target) | |
1383 | writepoint_drop_ptr(c, wp, i); | |
1384 | } | |
1385 | ||
1386 | static void verify_not_stale(struct bch_fs *c, const struct write_point *wp) | |
1387 | { | |
1388 | #ifdef CONFIG_BCACHEFS_DEBUG | |
1389 | struct open_bucket *ob; | |
1390 | unsigned i; | |
1391 | ||
1392 | writepoint_for_each_ptr_all(wp, ob, i) { | |
1393 | struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev); | |
1394 | ||
1395 | BUG_ON(ptr_stale(ca, &ob->ptr)); | |
1396 | } | |
1397 | #endif | |
1398 | } | |
1399 | ||
1400 | static int open_bucket_add_buckets(struct bch_fs *c, | |
1401 | u16 target, | |
1402 | struct write_point *wp, | |
1403 | struct bch_devs_list *devs_have, | |
1404 | unsigned nr_replicas, | |
1405 | enum alloc_reserve reserve, | |
1406 | struct closure *cl) | |
1407 | { | |
1408 | struct bch_devs_mask devs = c->rw_devs[wp->type]; | |
1409 | const struct bch_devs_mask *t; | |
1410 | struct open_bucket *ob; | |
1411 | unsigned i; | |
1412 | int ret; | |
1413 | ||
1414 | percpu_down_read(&c->usage_lock); | |
1415 | rcu_read_lock(); | |
1416 | ||
1417 | /* Don't allocate from devices we already have pointers to: */ | |
1418 | for (i = 0; i < devs_have->nr; i++) | |
1419 | __clear_bit(devs_have->devs[i], devs.d); | |
1420 | ||
1421 | writepoint_for_each_ptr_all(wp, ob, i) | |
1422 | __clear_bit(ob->ptr.dev, devs.d); | |
1423 | ||
1424 | t = bch2_target_to_mask(c, target); | |
1425 | if (t) | |
1426 | bitmap_and(devs.d, devs.d, t->d, BCH_SB_MEMBERS_MAX); | |
1427 | ||
1428 | ret = bch2_bucket_alloc_set(c, wp, nr_replicas, reserve, &devs, cl); | |
1429 | ||
1430 | rcu_read_unlock(); | |
1431 | percpu_up_read(&c->usage_lock); | |
1432 | ||
1433 | return ret; | |
1434 | } | |
1435 | ||
1436 | static struct write_point *__writepoint_find(struct hlist_head *head, | |
1437 | unsigned long write_point) | |
1438 | { | |
1439 | struct write_point *wp; | |
1440 | ||
1441 | hlist_for_each_entry_rcu(wp, head, node) | |
1442 | if (wp->write_point == write_point) | |
1443 | return wp; | |
1444 | ||
1445 | return NULL; | |
1446 | } | |
1447 | ||
1448 | static struct hlist_head *writepoint_hash(struct bch_fs *c, | |
1449 | unsigned long write_point) | |
1450 | { | |
1451 | unsigned hash = | |
1452 | hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash))); | |
1453 | ||
1454 | return &c->write_points_hash[hash]; | |
1455 | } | |
1456 | ||
1457 | static struct write_point *writepoint_find(struct bch_fs *c, | |
1458 | unsigned long write_point) | |
1459 | { | |
1460 | struct write_point *wp, *oldest; | |
1461 | struct hlist_head *head; | |
1462 | ||
1463 | if (!(write_point & 1UL)) { | |
1464 | wp = (struct write_point *) write_point; | |
1465 | mutex_lock(&wp->lock); | |
1466 | return wp; | |
1467 | } | |
1468 | ||
1469 | head = writepoint_hash(c, write_point); | |
1470 | restart_find: | |
1471 | wp = __writepoint_find(head, write_point); | |
1472 | if (wp) { | |
1473 | lock_wp: | |
1474 | mutex_lock(&wp->lock); | |
1475 | if (wp->write_point == write_point) | |
1476 | goto out; | |
1477 | mutex_unlock(&wp->lock); | |
1478 | goto restart_find; | |
1479 | } | |
1480 | ||
1481 | oldest = NULL; | |
1482 | for (wp = c->write_points; | |
1483 | wp < c->write_points + ARRAY_SIZE(c->write_points); | |
1484 | wp++) | |
1485 | if (!oldest || time_before64(wp->last_used, oldest->last_used)) | |
1486 | oldest = wp; | |
1487 | ||
1488 | mutex_lock(&oldest->lock); | |
1489 | mutex_lock(&c->write_points_hash_lock); | |
1490 | wp = __writepoint_find(head, write_point); | |
1491 | if (wp && wp != oldest) { | |
1492 | mutex_unlock(&c->write_points_hash_lock); | |
1493 | mutex_unlock(&oldest->lock); | |
1494 | goto lock_wp; | |
1495 | } | |
1496 | ||
1497 | wp = oldest; | |
1498 | hlist_del_rcu(&wp->node); | |
1499 | wp->write_point = write_point; | |
1500 | hlist_add_head_rcu(&wp->node, head); | |
1501 | mutex_unlock(&c->write_points_hash_lock); | |
1502 | out: | |
1503 | wp->last_used = sched_clock(); | |
1504 | return wp; | |
1505 | } | |
1506 | ||
1507 | /* | |
1508 | * Get us an open_bucket we can allocate from, return with it locked: | |
1509 | */ | |
1510 | struct write_point *bch2_alloc_sectors_start(struct bch_fs *c, | |
1511 | unsigned target, | |
1512 | struct write_point_specifier write_point, | |
1513 | struct bch_devs_list *devs_have, | |
1514 | unsigned nr_replicas, | |
1515 | unsigned nr_replicas_required, | |
1516 | enum alloc_reserve reserve, | |
1517 | unsigned flags, | |
1518 | struct closure *cl) | |
1519 | { | |
1520 | struct write_point *wp; | |
1521 | struct open_bucket *ob; | |
1522 | struct bch_dev *ca; | |
1523 | unsigned nr_ptrs_have, nr_ptrs_effective; | |
1524 | int ret, i, cache_idx = -1; | |
1525 | ||
1526 | BUG_ON(!nr_replicas || !nr_replicas_required); | |
1527 | ||
1528 | wp = writepoint_find(c, write_point.v); | |
1529 | ||
1530 | wp->first_ptr = 0; | |
1531 | ||
1532 | /* does writepoint have ptrs we can't use? */ | |
1533 | writepoint_for_each_ptr(wp, ob, i) | |
1534 | if (bch2_dev_list_has_dev(*devs_have, ob->ptr.dev)) { | |
1535 | swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]); | |
1536 | wp->first_ptr++; | |
1537 | } | |
1538 | ||
1539 | nr_ptrs_have = wp->first_ptr; | |
1540 | ||
1541 | /* does writepoint have ptrs we don't want to use? */ | |
1542 | if (target) | |
1543 | writepoint_for_each_ptr(wp, ob, i) | |
1544 | if (!bch2_dev_in_target(c, ob->ptr.dev, target)) { | |
1545 | swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]); | |
1546 | wp->first_ptr++; | |
1547 | } | |
1548 | ||
1549 | if (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS) { | |
1550 | ret = open_bucket_add_buckets(c, target, wp, devs_have, | |
1551 | nr_replicas, reserve, cl); | |
1552 | } else { | |
1553 | ret = open_bucket_add_buckets(c, target, wp, devs_have, | |
1554 | nr_replicas, reserve, NULL); | |
1555 | if (!ret) | |
1556 | goto alloc_done; | |
1557 | ||
1558 | wp->first_ptr = nr_ptrs_have; | |
1559 | ||
1560 | ret = open_bucket_add_buckets(c, 0, wp, devs_have, | |
1561 | nr_replicas, reserve, cl); | |
1562 | } | |
1563 | ||
1564 | if (ret && ret != -EROFS) | |
1565 | goto err; | |
1566 | alloc_done: | |
1567 | /* check for more than one cache: */ | |
1568 | for (i = wp->nr_ptrs - 1; i >= wp->first_ptr; --i) { | |
1569 | ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev); | |
1570 | ||
1571 | if (ca->mi.durability) | |
1572 | continue; | |
1573 | ||
1574 | /* | |
1575 | * if we ended up with more than one cache device, prefer the | |
1576 | * one in the target we want: | |
1577 | */ | |
1578 | if (cache_idx >= 0) { | |
1579 | if (!bch2_dev_in_target(c, wp->ptrs[i]->ptr.dev, | |
1580 | target)) { | |
1581 | writepoint_drop_ptr(c, wp, i); | |
1582 | } else { | |
1583 | writepoint_drop_ptr(c, wp, cache_idx); | |
1584 | cache_idx = i; | |
1585 | } | |
1586 | } else { | |
1587 | cache_idx = i; | |
1588 | } | |
1589 | } | |
1590 | ||
1591 | /* we might have more effective replicas than required: */ | |
1592 | nr_ptrs_effective = 0; | |
1593 | writepoint_for_each_ptr(wp, ob, i) { | |
1594 | ca = bch_dev_bkey_exists(c, ob->ptr.dev); | |
1595 | nr_ptrs_effective += ca->mi.durability; | |
1596 | } | |
1597 | ||
1598 | if (ret == -EROFS && | |
1599 | nr_ptrs_effective >= nr_replicas_required) | |
1600 | ret = 0; | |
1601 | ||
1602 | if (ret) | |
1603 | goto err; | |
1604 | ||
1605 | if (nr_ptrs_effective > nr_replicas) { | |
1606 | writepoint_for_each_ptr(wp, ob, i) { | |
1607 | ca = bch_dev_bkey_exists(c, ob->ptr.dev); | |
1608 | ||
1609 | if (ca->mi.durability && | |
1610 | ca->mi.durability <= nr_ptrs_effective - nr_replicas && | |
1611 | !bch2_dev_in_target(c, ob->ptr.dev, target)) { | |
1612 | swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]); | |
1613 | wp->first_ptr++; | |
1614 | nr_ptrs_effective -= ca->mi.durability; | |
1615 | } | |
1616 | } | |
1617 | } | |
1618 | ||
1619 | if (nr_ptrs_effective > nr_replicas) { | |
1620 | writepoint_for_each_ptr(wp, ob, i) { | |
1621 | ca = bch_dev_bkey_exists(c, ob->ptr.dev); | |
1622 | ||
1623 | if (ca->mi.durability && | |
1624 | ca->mi.durability <= nr_ptrs_effective - nr_replicas) { | |
1625 | swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]); | |
1626 | wp->first_ptr++; | |
1627 | nr_ptrs_effective -= ca->mi.durability; | |
1628 | } | |
1629 | } | |
1630 | } | |
1631 | ||
1632 | /* Remove pointers we don't want to use: */ | |
1633 | if (target) | |
1634 | writepoint_drop_ptrs(c, wp, target, false); | |
1635 | ||
1636 | BUG_ON(wp->first_ptr >= wp->nr_ptrs); | |
1637 | BUG_ON(nr_ptrs_effective < nr_replicas_required); | |
1638 | ||
1639 | wp->sectors_free = UINT_MAX; | |
1640 | ||
1641 | writepoint_for_each_ptr(wp, ob, i) | |
1642 | wp->sectors_free = min(wp->sectors_free, ob->sectors_free); | |
1643 | ||
1644 | BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX); | |
1645 | ||
1646 | verify_not_stale(c, wp); | |
1647 | ||
1648 | return wp; | |
1649 | err: | |
1650 | mutex_unlock(&wp->lock); | |
1651 | return ERR_PTR(ret); | |
1652 | } | |
1653 | ||
1654 | /* | |
1655 | * Append pointers to the space we just allocated to @k, and mark @sectors space | |
1656 | * as allocated out of @ob | |
1657 | */ | |
1658 | void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp, | |
1659 | struct bkey_i_extent *e, unsigned sectors) | |
1660 | { | |
1661 | struct open_bucket *ob; | |
1662 | unsigned i; | |
1663 | ||
1664 | BUG_ON(sectors > wp->sectors_free); | |
1665 | wp->sectors_free -= sectors; | |
1666 | ||
1667 | writepoint_for_each_ptr(wp, ob, i) { | |
1668 | struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev); | |
1669 | struct bch_extent_ptr tmp = ob->ptr; | |
1670 | ||
1671 | EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptr.dev)); | |
1672 | ||
1673 | tmp.cached = bkey_extent_is_cached(&e->k) || | |
1674 | (!ca->mi.durability && wp->type == BCH_DATA_USER); | |
1675 | ||
1676 | tmp.offset += ca->mi.bucket_size - ob->sectors_free; | |
1677 | extent_ptr_append(e, tmp); | |
1678 | ||
1679 | BUG_ON(sectors > ob->sectors_free); | |
1680 | ob->sectors_free -= sectors; | |
1681 | } | |
1682 | } | |
1683 | ||
1684 | /* | |
1685 | * Append pointers to the space we just allocated to @k, and mark @sectors space | |
1686 | * as allocated out of @ob | |
1687 | */ | |
1688 | void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp) | |
1689 | { | |
1690 | int i; | |
1691 | ||
1692 | for (i = wp->nr_ptrs - 1; i >= 0; --i) { | |
1693 | struct open_bucket *ob = wp->ptrs[i]; | |
1694 | ||
1695 | if (!ob->sectors_free) { | |
1696 | array_remove_item(wp->ptrs, wp->nr_ptrs, i); | |
1697 | bch2_open_bucket_put(c, ob); | |
1698 | } | |
1699 | } | |
1700 | ||
1701 | mutex_unlock(&wp->lock); | |
1702 | } | |
1703 | ||
1704 | /* Startup/shutdown (ro/rw): */ | |
1705 | ||
1706 | void bch2_recalc_capacity(struct bch_fs *c) | |
1707 | { | |
1708 | struct bch_dev *ca; | |
1709 | u64 total_capacity, capacity = 0, reserved_sectors = 0; | |
1710 | unsigned long ra_pages = 0; | |
1711 | unsigned i, j; | |
1712 | ||
1713 | lockdep_assert_held(&c->state_lock); | |
1714 | ||
1715 | for_each_online_member(ca, c, i) { | |
1716 | struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; | |
1717 | ||
1718 | ra_pages += bdi->ra_pages; | |
1719 | } | |
1720 | ||
1721 | bch2_set_ra_pages(c, ra_pages); | |
1722 | ||
1723 | for_each_rw_member(ca, c, i) { | |
1724 | size_t reserve = 0; | |
1725 | ||
1726 | /* | |
1727 | * We need to reserve buckets (from the number | |
1728 | * of currently available buckets) against | |
1729 | * foreground writes so that mainly copygc can | |
1730 | * make forward progress. | |
1731 | * | |
1732 | * We need enough to refill the various reserves | |
1733 | * from scratch - copygc will use its entire | |
1734 | * reserve all at once, then run against when | |
1735 | * its reserve is refilled (from the formerly | |
1736 | * available buckets). | |
1737 | * | |
1738 | * This reserve is just used when considering if | |
1739 | * allocations for foreground writes must wait - | |
1740 | * not -ENOSPC calculations. | |
1741 | */ | |
1742 | for (j = 0; j < RESERVE_NONE; j++) | |
1743 | reserve += ca->free[j].size; | |
1744 | ||
1745 | reserve += ca->free_inc.size; | |
1746 | ||
1747 | reserve += ARRAY_SIZE(c->write_points); | |
1748 | ||
1749 | reserve += 1; /* btree write point */ | |
1750 | ||
1751 | reserved_sectors += bucket_to_sector(ca, reserve); | |
1752 | ||
1753 | capacity += bucket_to_sector(ca, ca->mi.nbuckets - | |
1754 | ca->mi.first_bucket); | |
1755 | } | |
1756 | ||
1757 | total_capacity = capacity; | |
1758 | ||
1759 | capacity *= (100 - c->opts.gc_reserve_percent); | |
1760 | capacity = div64_u64(capacity, 100); | |
1761 | ||
1762 | BUG_ON(reserved_sectors > total_capacity); | |
1763 | ||
1764 | capacity = min(capacity, total_capacity - reserved_sectors); | |
1765 | ||
1766 | c->capacity = capacity; | |
1767 | ||
1768 | if (c->capacity) { | |
1769 | bch2_io_timer_add(&c->io_clock[READ], | |
1770 | &c->bucket_clock[READ].rescale); | |
1771 | bch2_io_timer_add(&c->io_clock[WRITE], | |
1772 | &c->bucket_clock[WRITE].rescale); | |
1773 | } else { | |
1774 | bch2_io_timer_del(&c->io_clock[READ], | |
1775 | &c->bucket_clock[READ].rescale); | |
1776 | bch2_io_timer_del(&c->io_clock[WRITE], | |
1777 | &c->bucket_clock[WRITE].rescale); | |
1778 | } | |
1779 | ||
1780 | /* Wake up case someone was waiting for buckets */ | |
1781 | closure_wake_up(&c->freelist_wait); | |
1782 | } | |
1783 | ||
1784 | static void bch2_stop_write_point(struct bch_fs *c, struct bch_dev *ca, | |
1785 | struct write_point *wp) | |
1786 | { | |
1787 | struct bch_devs_mask not_self; | |
1788 | ||
1789 | bitmap_complement(not_self.d, ca->self.d, BCH_SB_MEMBERS_MAX); | |
1790 | ||
1791 | mutex_lock(&wp->lock); | |
1792 | wp->first_ptr = wp->nr_ptrs; | |
1793 | writepoint_drop_ptrs(c, wp, dev_to_target(ca->dev_idx), true); | |
1794 | mutex_unlock(&wp->lock); | |
1795 | } | |
1796 | ||
1797 | static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) | |
1798 | { | |
1799 | struct open_bucket *ob; | |
1800 | bool ret = false; | |
1801 | ||
1802 | for (ob = c->open_buckets; | |
1803 | ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); | |
1804 | ob++) { | |
1805 | spin_lock(&ob->lock); | |
1806 | if (ob->valid && !ob->on_partial_list && | |
1807 | ob->ptr.dev == ca->dev_idx) | |
1808 | ret = true; | |
1809 | spin_unlock(&ob->lock); | |
1810 | } | |
1811 | ||
1812 | return ret; | |
1813 | } | |
1814 | ||
1815 | /* device goes ro: */ | |
1816 | void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) | |
1817 | { | |
1818 | unsigned i; | |
1819 | ||
1820 | BUG_ON(ca->alloc_thread); | |
1821 | ||
1822 | /* First, remove device from allocation groups: */ | |
1823 | ||
1824 | for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++) | |
1825 | clear_bit(ca->dev_idx, c->rw_devs[i].d); | |
1826 | ||
1827 | /* | |
1828 | * Capacity is calculated based off of devices in allocation groups: | |
1829 | */ | |
1830 | bch2_recalc_capacity(c); | |
1831 | ||
1832 | /* Next, close write points that point to this device... */ | |
1833 | for (i = 0; i < ARRAY_SIZE(c->write_points); i++) | |
1834 | bch2_stop_write_point(c, ca, &c->write_points[i]); | |
1835 | ||
1836 | bch2_stop_write_point(c, ca, &ca->copygc_write_point); | |
1837 | bch2_stop_write_point(c, ca, &c->rebalance_write_point); | |
1838 | bch2_stop_write_point(c, ca, &c->btree_write_point); | |
1839 | ||
1840 | mutex_lock(&c->btree_reserve_cache_lock); | |
1841 | while (c->btree_reserve_cache_nr) { | |
1842 | struct btree_alloc *a = | |
1843 | &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; | |
1844 | ||
1845 | bch2_open_bucket_put_refs(c, &a->ob.nr, a->ob.refs); | |
1846 | } | |
1847 | mutex_unlock(&c->btree_reserve_cache_lock); | |
1848 | ||
1849 | /* | |
1850 | * Wake up threads that were blocked on allocation, so they can notice | |
1851 | * the device can no longer be removed and the capacity has changed: | |
1852 | */ | |
1853 | closure_wake_up(&c->freelist_wait); | |
1854 | ||
1855 | /* | |
1856 | * journal_res_get() can block waiting for free space in the journal - | |
1857 | * it needs to notice there may not be devices to allocate from anymore: | |
1858 | */ | |
1859 | wake_up(&c->journal.wait); | |
1860 | ||
1861 | /* Now wait for any in flight writes: */ | |
1862 | ||
1863 | closure_wait_event(&c->open_buckets_wait, | |
1864 | !bch2_dev_has_open_write_point(c, ca)); | |
1865 | } | |
1866 | ||
1867 | /* device goes rw: */ | |
1868 | void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) | |
1869 | { | |
1870 | unsigned i; | |
1871 | ||
1872 | for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++) | |
1873 | if (ca->mi.data_allowed & (1 << i)) | |
1874 | set_bit(ca->dev_idx, c->rw_devs[i].d); | |
1875 | } | |
1876 | ||
1877 | /* stop allocator thread: */ | |
1878 | void bch2_dev_allocator_stop(struct bch_dev *ca) | |
1879 | { | |
1880 | struct task_struct *p; | |
1881 | ||
1882 | p = rcu_dereference_protected(ca->alloc_thread, 1); | |
1883 | ca->alloc_thread = NULL; | |
1884 | ||
1885 | /* | |
1886 | * We need an rcu barrier between setting ca->alloc_thread = NULL and | |
1887 | * the thread shutting down to avoid bch2_wake_allocator() racing: | |
1888 | * | |
1889 | * XXX: it would be better to have the rcu barrier be asynchronous | |
1890 | * instead of blocking us here | |
1891 | */ | |
1892 | synchronize_rcu(); | |
1893 | ||
1894 | if (p) { | |
1895 | kthread_stop(p); | |
1896 | put_task_struct(p); | |
1897 | } | |
1898 | } | |
1899 | ||
1900 | /* start allocator thread: */ | |
1901 | int bch2_dev_allocator_start(struct bch_dev *ca) | |
1902 | { | |
1903 | struct task_struct *p; | |
1904 | ||
1905 | /* | |
1906 | * allocator thread already started? | |
1907 | */ | |
1908 | if (ca->alloc_thread) | |
1909 | return 0; | |
1910 | ||
1911 | p = kthread_create(bch2_allocator_thread, ca, | |
1912 | "bch_alloc[%s]", ca->name); | |
1913 | if (IS_ERR(p)) | |
1914 | return PTR_ERR(p); | |
1915 | ||
1916 | get_task_struct(p); | |
1917 | rcu_assign_pointer(ca->alloc_thread, p); | |
1918 | wake_up_process(p); | |
1919 | return 0; | |
1920 | } | |
1921 | ||
b29e197a KO |
1922 | static void flush_held_btree_writes(struct bch_fs *c) |
1923 | { | |
1924 | struct bucket_table *tbl; | |
1925 | struct rhash_head *pos; | |
1926 | struct btree *b; | |
1927 | bool flush_updates; | |
1928 | size_t i, nr_pending_updates; | |
1929 | ||
1930 | clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags); | |
1931 | again: | |
1932 | pr_debug("flushing dirty btree nodes"); | |
1933 | cond_resched(); | |
1934 | ||
1935 | flush_updates = false; | |
1936 | nr_pending_updates = bch2_btree_interior_updates_nr_pending(c); | |
1937 | ||
1938 | rcu_read_lock(); | |
1939 | for_each_cached_btree(b, c, tbl, i, pos) | |
1940 | if (btree_node_dirty(b) && (!b->written || b->level)) { | |
1941 | if (btree_node_may_write(b)) { | |
1942 | rcu_read_unlock(); | |
1943 | btree_node_lock_type(c, b, SIX_LOCK_read); | |
1944 | bch2_btree_node_write(c, b, SIX_LOCK_read); | |
1945 | six_unlock_read(&b->lock); | |
1946 | goto again; | |
1947 | } else { | |
1948 | flush_updates = true; | |
1949 | } | |
1950 | } | |
1951 | rcu_read_unlock(); | |
1952 | ||
1953 | if (c->btree_roots_dirty) | |
1954 | bch2_journal_meta(&c->journal); | |
1955 | ||
1956 | /* | |
1957 | * This is ugly, but it's needed to flush btree node writes | |
1958 | * without spinning... | |
1959 | */ | |
1960 | if (flush_updates) { | |
1961 | closure_wait_event(&c->btree_interior_update_wait, | |
1962 | bch2_btree_interior_updates_nr_pending(c) < | |
1963 | nr_pending_updates); | |
1964 | goto again; | |
1965 | } | |
1966 | ||
1967 | } | |
1968 | ||
1c6fdbd8 KO |
1969 | static void allocator_start_issue_discards(struct bch_fs *c) |
1970 | { | |
1971 | struct bch_dev *ca; | |
1972 | unsigned dev_iter; | |
b29e197a | 1973 | size_t bu; |
1c6fdbd8 | 1974 | |
b29e197a KO |
1975 | for_each_rw_member(ca, c, dev_iter) |
1976 | while (fifo_pop(&ca->free_inc, bu)) | |
1c6fdbd8 KO |
1977 | blkdev_issue_discard(ca->disk_sb.bdev, |
1978 | bucket_to_sector(ca, bu), | |
1979 | ca->mi.bucket_size, GFP_NOIO); | |
1c6fdbd8 KO |
1980 | } |
1981 | ||
1982 | static int __bch2_fs_allocator_start(struct bch_fs *c) | |
1983 | { | |
1984 | struct bch_dev *ca; | |
1c6fdbd8 KO |
1985 | unsigned dev_iter; |
1986 | u64 journal_seq = 0; | |
b29e197a | 1987 | long bu; |
1c6fdbd8 KO |
1988 | bool invalidating_data = false; |
1989 | int ret = 0; | |
1990 | ||
1991 | if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) | |
1992 | return -1; | |
1993 | ||
b29e197a KO |
1994 | if (test_alloc_startup(c)) { |
1995 | invalidating_data = true; | |
1996 | goto not_enough; | |
1997 | } | |
1998 | ||
1c6fdbd8 KO |
1999 | /* Scan for buckets that are already invalidated: */ |
2000 | for_each_rw_member(ca, c, dev_iter) { | |
2001 | struct btree_iter iter; | |
2002 | struct bucket_mark m; | |
2003 | struct bkey_s_c k; | |
2004 | ||
2005 | for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), 0, k) { | |
2006 | if (k.k->type != BCH_ALLOC) | |
2007 | continue; | |
2008 | ||
2009 | bu = k.k->p.offset; | |
2010 | m = READ_ONCE(bucket(ca, bu)->mark); | |
2011 | ||
2012 | if (!is_available_bucket(m) || m.cached_sectors) | |
2013 | continue; | |
2014 | ||
2015 | percpu_down_read(&c->usage_lock); | |
2016 | bch2_mark_alloc_bucket(c, ca, bu, true, | |
2017 | gc_pos_alloc(c, NULL), | |
2018 | BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE| | |
2019 | BCH_BUCKET_MARK_GC_LOCK_HELD); | |
2020 | percpu_up_read(&c->usage_lock); | |
2021 | ||
2022 | fifo_push(&ca->free_inc, bu); | |
1c6fdbd8 KO |
2023 | |
2024 | if (fifo_full(&ca->free_inc)) | |
2025 | break; | |
2026 | } | |
2027 | bch2_btree_iter_unlock(&iter); | |
2028 | } | |
2029 | ||
2030 | /* did we find enough buckets? */ | |
2031 | for_each_rw_member(ca, c, dev_iter) | |
2032 | if (fifo_used(&ca->free_inc) < ca->free[RESERVE_BTREE].size) { | |
2033 | percpu_ref_put(&ca->io_ref); | |
2034 | goto not_enough; | |
2035 | } | |
2036 | ||
2037 | return 0; | |
2038 | not_enough: | |
2039 | pr_debug("did not find enough empty buckets; issuing discards"); | |
2040 | ||
b29e197a | 2041 | /* clear out free_inc, we'll be using it again below: */ |
1c6fdbd8 KO |
2042 | for_each_rw_member(ca, c, dev_iter) |
2043 | discard_invalidated_buckets(c, ca); | |
2044 | ||
2045 | pr_debug("scanning for reclaimable buckets"); | |
2046 | ||
2047 | for_each_rw_member(ca, c, dev_iter) { | |
1c6fdbd8 | 2048 | find_reclaimable_buckets(c, ca); |
1c6fdbd8 | 2049 | |
b29e197a KO |
2050 | while (!fifo_full(&ca->free[RESERVE_BTREE]) && |
2051 | (bu = next_alloc_bucket(ca)) >= 0) { | |
2052 | invalidating_data |= | |
2053 | bch2_invalidate_one_bucket(c, ca, bu, &journal_seq); | |
1c6fdbd8 | 2054 | |
b29e197a KO |
2055 | fifo_push(&ca->free[RESERVE_BTREE], bu); |
2056 | set_bit(bu, ca->buckets_dirty); | |
2057 | } | |
1c6fdbd8 KO |
2058 | } |
2059 | ||
2060 | pr_debug("done scanning for reclaimable buckets"); | |
2061 | ||
2062 | /* | |
2063 | * We're moving buckets to freelists _before_ they've been marked as | |
2064 | * invalidated on disk - we have to so that we can allocate new btree | |
2065 | * nodes to mark them as invalidated on disk. | |
2066 | * | |
2067 | * However, we can't _write_ to any of these buckets yet - they might | |
2068 | * have cached data in them, which is live until they're marked as | |
2069 | * invalidated on disk: | |
2070 | */ | |
2071 | if (invalidating_data) { | |
277c981c KO |
2072 | BUG(); |
2073 | pr_info("holding writes"); | |
1c6fdbd8 KO |
2074 | pr_debug("invalidating existing data"); |
2075 | set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags); | |
2076 | } else { | |
2077 | pr_debug("issuing discards"); | |
2078 | allocator_start_issue_discards(c); | |
2079 | } | |
2080 | ||
2081 | /* | |
2082 | * XXX: it's possible for this to deadlock waiting on journal reclaim, | |
2083 | * since we're holding btree writes. What then? | |
2084 | */ | |
b29e197a KO |
2085 | ret = bch2_alloc_write(c); |
2086 | if (ret) | |
2087 | return ret; | |
1c6fdbd8 KO |
2088 | |
2089 | if (invalidating_data) { | |
2090 | pr_debug("flushing journal"); | |
2091 | ||
2092 | ret = bch2_journal_flush_seq(&c->journal, journal_seq); | |
2093 | if (ret) | |
2094 | return ret; | |
2095 | ||
2096 | pr_debug("issuing discards"); | |
2097 | allocator_start_issue_discards(c); | |
2098 | } | |
2099 | ||
1c6fdbd8 KO |
2100 | set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags); |
2101 | ||
2102 | /* now flush dirty btree nodes: */ | |
b29e197a KO |
2103 | if (invalidating_data) |
2104 | flush_held_btree_writes(c); | |
1c6fdbd8 KO |
2105 | |
2106 | return 0; | |
2107 | } | |
2108 | ||
2109 | int bch2_fs_allocator_start(struct bch_fs *c) | |
2110 | { | |
2111 | struct bch_dev *ca; | |
2112 | unsigned i; | |
2113 | int ret; | |
2114 | ||
2115 | down_read(&c->gc_lock); | |
2116 | ret = __bch2_fs_allocator_start(c); | |
2117 | up_read(&c->gc_lock); | |
2118 | ||
2119 | if (ret) | |
2120 | return ret; | |
2121 | ||
2122 | for_each_rw_member(ca, c, i) { | |
2123 | ret = bch2_dev_allocator_start(ca); | |
2124 | if (ret) { | |
2125 | percpu_ref_put(&ca->io_ref); | |
2126 | return ret; | |
2127 | } | |
2128 | } | |
2129 | ||
2130 | return bch2_alloc_write(c); | |
2131 | } | |
2132 | ||
2133 | void bch2_fs_allocator_init(struct bch_fs *c) | |
2134 | { | |
2135 | struct open_bucket *ob; | |
2136 | struct write_point *wp; | |
2137 | ||
2138 | mutex_init(&c->write_points_hash_lock); | |
2139 | spin_lock_init(&c->freelist_lock); | |
2140 | bch2_bucket_clock_init(c, READ); | |
2141 | bch2_bucket_clock_init(c, WRITE); | |
2142 | ||
2143 | /* open bucket 0 is a sentinal NULL: */ | |
2144 | spin_lock_init(&c->open_buckets[0].lock); | |
2145 | ||
2146 | for (ob = c->open_buckets + 1; | |
2147 | ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) { | |
2148 | spin_lock_init(&ob->lock); | |
2149 | c->open_buckets_nr_free++; | |
2150 | ||
2151 | ob->freelist = c->open_buckets_freelist; | |
2152 | c->open_buckets_freelist = ob - c->open_buckets; | |
2153 | } | |
2154 | ||
2155 | writepoint_init(&c->btree_write_point, BCH_DATA_BTREE); | |
2156 | writepoint_init(&c->rebalance_write_point, BCH_DATA_USER); | |
2157 | ||
2158 | for (wp = c->write_points; | |
2159 | wp < c->write_points + ARRAY_SIZE(c->write_points); wp++) { | |
2160 | writepoint_init(wp, BCH_DATA_USER); | |
2161 | ||
2162 | wp->last_used = sched_clock(); | |
2163 | wp->write_point = (unsigned long) wp; | |
2164 | hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point)); | |
2165 | } | |
2166 | ||
2167 | c->pd_controllers_update_seconds = 5; | |
2168 | INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update); | |
2169 | } |