34d3b117085ba3a83e26a94c007d119711b2e433
[linux-block.git] / fs / bcachefs / buckets.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  *
7  * Bucket states:
8  * - free bucket: mark == 0
9  *   The bucket contains no data and will not be read
10  *
11  * - allocator bucket: owned_by_allocator == 1
12  *   The bucket is on a free list, or it is an open bucket
13  *
14  * - cached bucket: owned_by_allocator == 0 &&
15  *                  dirty_sectors == 0 &&
16  *                  cached_sectors > 0
17  *   The bucket contains data but may be safely discarded as there are
18  *   enough replicas of the data on other cache devices, or it has been
19  *   written back to the backing device
20  *
21  * - dirty bucket: owned_by_allocator == 0 &&
22  *                 dirty_sectors > 0
23  *   The bucket contains data that we must not discard (either only copy,
24  *   or one of the 'main copies' for data requiring multiple replicas)
25  *
26  * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27  *   This is a btree node, journal or gen/prio bucket
28  *
29  * Lifecycle:
30  *
31  * bucket invalidated => bucket on freelist => open bucket =>
32  *     [dirty bucket =>] cached bucket => bucket invalidated => ...
33  *
34  * Note that cache promotion can skip the dirty bucket step, as data
35  * is copied from a deeper tier to a shallower tier, onto a cached
36  * bucket.
37  * Note also that a cached bucket can spontaneously become dirty --
38  * see below.
39  *
40  * Only a traversal of the key space can determine whether a bucket is
41  * truly dirty or cached.
42  *
43  * Transitions:
44  *
45  * - free => allocator: bucket was invalidated
46  * - cached => allocator: bucket was invalidated
47  *
48  * - allocator => dirty: open bucket was filled up
49  * - allocator => cached: open bucket was filled up
50  * - allocator => metadata: metadata was allocated
51  *
52  * - dirty => cached: dirty sectors were copied to a deeper tier
53  * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54  * - cached => free: cached sectors were overwritten
55  *
56  * - metadata => free: metadata was freed
57  *
58  * Oddities:
59  * - cached => dirty: a device was removed so formerly replicated data
60  *                    is no longer sufficiently replicated
61  * - free => cached: cannot happen
62  * - free => dirty: cannot happen
63  * - free => metadata: cannot happen
64  */
65
66 #include "bcachefs.h"
67 #include "alloc_background.h"
68 #include "bset.h"
69 #include "btree_gc.h"
70 #include "btree_update.h"
71 #include "buckets.h"
72 #include "ec.h"
73 #include "error.h"
74 #include "movinggc.h"
75 #include "replicas.h"
76 #include "trace.h"
77
78 #include <linux/preempt.h>
79
80 /*
81  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
82  * wraparound:
83  */
84 void bch2_bucket_seq_cleanup(struct bch_fs *c)
85 {
86         u64 journal_seq = atomic64_read(&c->journal.seq);
87         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
88         struct bch_dev *ca;
89         struct bucket_array *buckets;
90         struct bucket *g;
91         struct bucket_mark m;
92         unsigned i;
93
94         if (journal_seq - c->last_bucket_seq_cleanup <
95             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
96                 return;
97
98         c->last_bucket_seq_cleanup = journal_seq;
99
100         for_each_member_device(ca, c, i) {
101                 down_read(&ca->bucket_lock);
102                 buckets = bucket_array(ca);
103
104                 for_each_bucket(g, buckets) {
105                         bucket_cmpxchg(g, m, ({
106                                 if (!m.journal_seq_valid ||
107                                     bucket_needs_journal_commit(m, last_seq_ondisk))
108                                         break;
109
110                                 m.journal_seq_valid = 0;
111                         }));
112                 }
113                 up_read(&ca->bucket_lock);
114         }
115 }
116
117 void bch2_fs_usage_initialize(struct bch_fs *c)
118 {
119         struct bch_fs_usage *usage;
120         unsigned i;
121
122         percpu_down_write(&c->mark_lock);
123         usage = c->usage_base;
124
125         for (i = 0; i < ARRAY_SIZE(c->usage); i++)
126                 bch2_fs_usage_acc_to_base(c, i);
127
128         for (i = 0; i < BCH_REPLICAS_MAX; i++)
129                 usage->reserved += usage->persistent_reserved[i];
130
131         for (i = 0; i < c->replicas.nr; i++) {
132                 struct bch_replicas_entry *e =
133                         cpu_replicas_entry(&c->replicas, i);
134
135                 switch (e->data_type) {
136                 case BCH_DATA_BTREE:
137                         usage->btree    += usage->replicas[i];
138                         break;
139                 case BCH_DATA_USER:
140                         usage->data     += usage->replicas[i];
141                         break;
142                 case BCH_DATA_CACHED:
143                         usage->cached   += usage->replicas[i];
144                         break;
145                 }
146         }
147
148         percpu_up_write(&c->mark_lock);
149 }
150
151 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
152 {
153         if (fs_usage == c->usage_scratch)
154                 mutex_unlock(&c->usage_scratch_lock);
155         else
156                 kfree(fs_usage);
157 }
158
159 struct bch_fs_usage_online *bch2_fs_usage_scratch_get(struct bch_fs *c)
160 {
161         struct bch_fs_usage_online *ret;
162         unsigned bytes = sizeof(struct bch_fs_usage_online) + sizeof(u64) *
163                 READ_ONCE(c->replicas.nr);
164         ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
165         if (ret)
166                 return ret;
167
168         if (mutex_trylock(&c->usage_scratch_lock))
169                 goto out_pool;
170
171         ret = kzalloc(bytes, GFP_NOFS);
172         if (ret)
173                 return ret;
174
175         mutex_lock(&c->usage_scratch_lock);
176 out_pool:
177         ret = c->usage_scratch;
178         memset(ret, 0, bytes);
179         return ret;
180 }
181
182 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
183 {
184         struct bch_dev_usage ret;
185
186         memset(&ret, 0, sizeof(ret));
187         acc_u64s_percpu((u64 *) &ret,
188                         (u64 __percpu *) ca->usage[0],
189                         sizeof(ret) / sizeof(u64));
190
191         return ret;
192 }
193
194 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
195                                                 unsigned journal_seq,
196                                                 bool gc)
197 {
198         return this_cpu_ptr(gc
199                             ? c->usage_gc
200                             : c->usage[journal_seq & 1]);
201 }
202
203 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
204 {
205         ssize_t offset = v - (u64 *) c->usage_base;
206         unsigned seq;
207         u64 ret;
208
209         BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
210         percpu_rwsem_assert_held(&c->mark_lock);
211
212         do {
213                 seq = read_seqcount_begin(&c->usage_lock);
214                 ret = *v +
215                         percpu_u64_get((u64 __percpu *) c->usage[0] + offset) +
216                         percpu_u64_get((u64 __percpu *) c->usage[1] + offset);
217         } while (read_seqcount_retry(&c->usage_lock, seq));
218
219         return ret;
220 }
221
222 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
223 {
224         struct bch_fs_usage_online *ret;
225         unsigned seq, i, u64s;
226
227         percpu_down_read(&c->mark_lock);
228
229         ret = kmalloc(sizeof(struct bch_fs_usage_online) +
230                       sizeof(u64) + c->replicas.nr, GFP_NOFS);
231         if (unlikely(!ret)) {
232                 percpu_up_read(&c->mark_lock);
233                 return NULL;
234         }
235
236         ret->online_reserved = percpu_u64_get(c->online_reserved);
237
238         u64s = fs_usage_u64s(c);
239         do {
240                 seq = read_seqcount_begin(&c->usage_lock);
241                 memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
242                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
243                         acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
244         } while (read_seqcount_retry(&c->usage_lock, seq));
245
246         return ret;
247 }
248
249 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
250 {
251         unsigned u64s = fs_usage_u64s(c);
252
253         BUG_ON(idx >= ARRAY_SIZE(c->usage));
254
255         preempt_disable();
256         write_seqcount_begin(&c->usage_lock);
257
258         acc_u64s_percpu((u64 *) c->usage_base,
259                         (u64 __percpu *) c->usage[idx], u64s);
260         percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
261
262         write_seqcount_end(&c->usage_lock);
263         preempt_enable();
264 }
265
266 void bch2_fs_usage_to_text(struct printbuf *out,
267                            struct bch_fs *c,
268                            struct bch_fs_usage_online *fs_usage)
269 {
270         unsigned i;
271
272         pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
273
274         pr_buf(out, "hidden:\t\t\t\t%llu\n",
275                fs_usage->u.hidden);
276         pr_buf(out, "data:\t\t\t\t%llu\n",
277                fs_usage->u.data);
278         pr_buf(out, "cached:\t\t\t\t%llu\n",
279                fs_usage->u.cached);
280         pr_buf(out, "reserved:\t\t\t%llu\n",
281                fs_usage->u.reserved);
282         pr_buf(out, "nr_inodes:\t\t\t%llu\n",
283                fs_usage->u.nr_inodes);
284         pr_buf(out, "online reserved:\t\t%llu\n",
285                fs_usage->online_reserved);
286
287         for (i = 0;
288              i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
289              i++) {
290                 pr_buf(out, "%u replicas:\n", i + 1);
291                 pr_buf(out, "\treserved:\t\t%llu\n",
292                        fs_usage->u.persistent_reserved[i]);
293         }
294
295         for (i = 0; i < c->replicas.nr; i++) {
296                 struct bch_replicas_entry *e =
297                         cpu_replicas_entry(&c->replicas, i);
298
299                 pr_buf(out, "\t");
300                 bch2_replicas_entry_to_text(out, e);
301                 pr_buf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
302         }
303 }
304
305 #define RESERVE_FACTOR  6
306
307 static u64 reserve_factor(u64 r)
308 {
309         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
310 }
311
312 static u64 avail_factor(u64 r)
313 {
314         return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
315 }
316
317 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
318 {
319         return min(fs_usage->u.hidden +
320                    fs_usage->u.btree +
321                    fs_usage->u.data +
322                    reserve_factor(fs_usage->u.reserved +
323                                   fs_usage->online_reserved),
324                    c->capacity);
325 }
326
327 static struct bch_fs_usage_short
328 __bch2_fs_usage_read_short(struct bch_fs *c)
329 {
330         struct bch_fs_usage_short ret;
331         u64 data, reserved;
332
333         ret.capacity = c->capacity -
334                 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
335
336         data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
337                 bch2_fs_usage_read_one(c, &c->usage_base->btree);
338         reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
339                 percpu_u64_get(c->online_reserved);
340
341         ret.used        = min(ret.capacity, data + reserve_factor(reserved));
342         ret.free        = ret.capacity - ret.used;
343
344         ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
345
346         return ret;
347 }
348
349 struct bch_fs_usage_short
350 bch2_fs_usage_read_short(struct bch_fs *c)
351 {
352         struct bch_fs_usage_short ret;
353
354         percpu_down_read(&c->mark_lock);
355         ret = __bch2_fs_usage_read_short(c);
356         percpu_up_read(&c->mark_lock);
357
358         return ret;
359 }
360
361 static inline int is_unavailable_bucket(struct bucket_mark m)
362 {
363         return !is_available_bucket(m);
364 }
365
366 static inline int is_fragmented_bucket(struct bucket_mark m,
367                                        struct bch_dev *ca)
368 {
369         if (!m.owned_by_allocator &&
370             m.data_type == BCH_DATA_USER &&
371             bucket_sectors_used(m))
372                 return max_t(int, 0, (int) ca->mi.bucket_size -
373                              bucket_sectors_used(m));
374         return 0;
375 }
376
377 static inline enum bch_data_type bucket_type(struct bucket_mark m)
378 {
379         return m.cached_sectors && !m.dirty_sectors
380                 ? BCH_DATA_CACHED
381                 : m.data_type;
382 }
383
384 static bool bucket_became_unavailable(struct bucket_mark old,
385                                       struct bucket_mark new)
386 {
387         return is_available_bucket(old) &&
388                !is_available_bucket(new);
389 }
390
391 int bch2_fs_usage_apply(struct bch_fs *c,
392                         struct bch_fs_usage_online *src,
393                         struct disk_reservation *disk_res,
394                         unsigned journal_seq)
395 {
396         struct bch_fs_usage *dst = fs_usage_ptr(c, journal_seq, false);
397         s64 added = src->u.data + src->u.reserved;
398         s64 should_not_have_added;
399         int ret = 0;
400
401         percpu_rwsem_assert_held(&c->mark_lock);
402
403         /*
404          * Not allowed to reduce sectors_available except by getting a
405          * reservation:
406          */
407         should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
408         if (WARN_ONCE(should_not_have_added > 0,
409                       "disk usage increased by %lli more than reservation of %llu",
410                       added, disk_res ? disk_res->sectors : 0)) {
411                 atomic64_sub(should_not_have_added, &c->sectors_available);
412                 added -= should_not_have_added;
413                 ret = -1;
414         }
415
416         if (added > 0) {
417                 disk_res->sectors       -= added;
418                 src->online_reserved    -= added;
419         }
420
421         this_cpu_add(*c->online_reserved, src->online_reserved);
422
423         preempt_disable();
424         acc_u64s((u64 *) dst, (u64 *) &src->u, fs_usage_u64s(c));
425         preempt_enable();
426
427         return ret;
428 }
429
430 static inline void account_bucket(struct bch_fs_usage *fs_usage,
431                                   struct bch_dev_usage *dev_usage,
432                                   enum bch_data_type type,
433                                   int nr, s64 size)
434 {
435         if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL)
436                 fs_usage->hidden        += size;
437
438         dev_usage->buckets[type]        += nr;
439 }
440
441 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
442                                   struct bch_fs_usage *fs_usage,
443                                   struct bucket_mark old, struct bucket_mark new,
444                                   bool gc)
445 {
446         struct bch_dev_usage *dev_usage;
447
448         percpu_rwsem_assert_held(&c->mark_lock);
449
450         preempt_disable();
451         dev_usage = this_cpu_ptr(ca->usage[gc]);
452
453         if (bucket_type(old))
454                 account_bucket(fs_usage, dev_usage, bucket_type(old),
455                                -1, -ca->mi.bucket_size);
456
457         if (bucket_type(new))
458                 account_bucket(fs_usage, dev_usage, bucket_type(new),
459                                1, ca->mi.bucket_size);
460
461         dev_usage->buckets_ec += (int) new.stripe - (int) old.stripe;
462         dev_usage->buckets_unavailable +=
463                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
464
465         dev_usage->sectors[old.data_type] -= old.dirty_sectors;
466         dev_usage->sectors[new.data_type] += new.dirty_sectors;
467         dev_usage->sectors[BCH_DATA_CACHED] +=
468                 (int) new.cached_sectors - (int) old.cached_sectors;
469         dev_usage->sectors_fragmented +=
470                 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
471         preempt_enable();
472
473         if (!is_available_bucket(old) && is_available_bucket(new))
474                 bch2_wake_allocator(ca);
475 }
476
477 void bch2_dev_usage_from_buckets(struct bch_fs *c)
478 {
479         struct bch_dev *ca;
480         struct bucket_mark old = { .v.counter = 0 };
481         struct bucket_array *buckets;
482         struct bucket *g;
483         unsigned i;
484         int cpu;
485
486         c->usage_base->hidden = 0;
487
488         for_each_member_device(ca, c, i) {
489                 for_each_possible_cpu(cpu)
490                         memset(per_cpu_ptr(ca->usage[0], cpu), 0,
491                                sizeof(*ca->usage[0]));
492
493                 buckets = bucket_array(ca);
494
495                 for_each_bucket(g, buckets)
496                         bch2_dev_usage_update(c, ca, c->usage_base,
497                                               old, g->mark, false);
498         }
499 }
500
501 static inline void update_replicas(struct bch_fs *c,
502                                    struct bch_fs_usage *fs_usage,
503                                    struct bch_replicas_entry *r,
504                                    s64 sectors)
505 {
506         int idx = bch2_replicas_entry_idx(c, r);
507
508         BUG_ON(idx < 0);
509
510         switch (r->data_type) {
511         case BCH_DATA_BTREE:
512                 fs_usage->btree         += sectors;
513                 break;
514         case BCH_DATA_USER:
515                 fs_usage->data          += sectors;
516                 break;
517         case BCH_DATA_CACHED:
518                 fs_usage->cached        += sectors;
519                 break;
520         }
521         fs_usage->replicas[idx]         += sectors;
522 }
523
524 static inline void update_cached_sectors(struct bch_fs *c,
525                                          struct bch_fs_usage *fs_usage,
526                                          unsigned dev, s64 sectors)
527 {
528         struct bch_replicas_padded r;
529
530         bch2_replicas_entry_cached(&r.e, dev);
531
532         update_replicas(c, fs_usage, &r.e, sectors);
533 }
534
535 static struct replicas_delta_list *
536 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
537 {
538         struct replicas_delta_list *d = trans->fs_usage_deltas;
539         unsigned new_size = d ? (d->size + more) * 2 : 128;
540
541         if (!d || d->used + more > d->size) {
542                 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
543                 BUG_ON(!d);
544
545                 d->size = new_size;
546                 trans->fs_usage_deltas = d;
547         }
548         return d;
549 }
550
551 static inline void update_replicas_list(struct btree_trans *trans,
552                                         struct bch_replicas_entry *r,
553                                         s64 sectors)
554 {
555         struct replicas_delta_list *d;
556         struct replicas_delta *n;
557         unsigned b;
558
559         if (!sectors)
560                 return;
561
562         b = replicas_entry_bytes(r) + 8;
563         d = replicas_deltas_realloc(trans, b);
564
565         n = (void *) d->d + d->used;
566         n->delta = sectors;
567         memcpy((void *) n + offsetof(struct replicas_delta, r),
568                r, replicas_entry_bytes(r));
569         d->used += b;
570 }
571
572 static inline void update_cached_sectors_list(struct btree_trans *trans,
573                                               unsigned dev, s64 sectors)
574 {
575         struct bch_replicas_padded r;
576
577         bch2_replicas_entry_cached(&r.e, dev);
578
579         update_replicas_list(trans, &r.e, sectors);
580 }
581
582 void bch2_replicas_delta_list_apply(struct bch_fs *c,
583                                     struct bch_fs_usage *fs_usage,
584                                     struct replicas_delta_list *r)
585 {
586         struct replicas_delta *d = r->d;
587         struct replicas_delta *top = (void *) r->d + r->used;
588         unsigned i;
589
590         fs_usage->nr_inodes += r->nr_inodes;
591
592         for (i = 0; i < BCH_REPLICAS_MAX; i++) {
593                 fs_usage->reserved += r->persistent_reserved[i];
594                 fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
595         }
596
597         while (d != top) {
598                 BUG_ON((void *) d > (void *) top);
599
600                 update_replicas(c, fs_usage, &d->r, d->delta);
601
602                 d = (void *) d + replicas_entry_bytes(&d->r) + 8;
603         }
604 }
605
606 #define do_mark_fn(fn, c, pos, flags, ...)                              \
607 ({                                                                      \
608         int gc, ret = 0;                                                \
609                                                                         \
610         percpu_rwsem_assert_held(&c->mark_lock);                        \
611                                                                         \
612         for (gc = 0; gc < 2 && !ret; gc++)                              \
613                 if (!gc == !(flags & BCH_BUCKET_MARK_GC) ||             \
614                     (gc && gc_visited(c, pos)))                         \
615                         ret = fn(c, __VA_ARGS__, gc);                   \
616         ret;                                                            \
617 })
618
619 static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
620                                     size_t b, struct bucket_mark *ret,
621                                     bool gc)
622 {
623         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
624         struct bucket *g = __bucket(ca, b, gc);
625         struct bucket_mark old, new;
626
627         old = bucket_cmpxchg(g, new, ({
628                 BUG_ON(!is_available_bucket(new));
629
630                 new.owned_by_allocator  = true;
631                 new.data_type           = 0;
632                 new.cached_sectors      = 0;
633                 new.dirty_sectors       = 0;
634                 new.gen++;
635         }));
636
637         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
638
639         if (old.cached_sectors)
640                 update_cached_sectors(c, fs_usage, ca->dev_idx,
641                                       -((s64) old.cached_sectors));
642
643         if (!gc)
644                 *ret = old;
645         return 0;
646 }
647
648 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
649                             size_t b, struct bucket_mark *old)
650 {
651         do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
652                    ca, b, old);
653
654         if (!old->owned_by_allocator && old->cached_sectors)
655                 trace_invalidate(ca, bucket_to_sector(ca, b),
656                                  old->cached_sectors);
657 }
658
659 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
660                                     size_t b, bool owned_by_allocator,
661                                     bool gc)
662 {
663         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
664         struct bucket *g = __bucket(ca, b, gc);
665         struct bucket_mark old, new;
666
667         old = bucket_cmpxchg(g, new, ({
668                 new.owned_by_allocator  = owned_by_allocator;
669         }));
670
671         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
672
673         BUG_ON(!gc &&
674                !owned_by_allocator && !old.owned_by_allocator);
675
676         return 0;
677 }
678
679 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
680                             size_t b, bool owned_by_allocator,
681                             struct gc_pos pos, unsigned flags)
682 {
683         preempt_disable();
684
685         do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
686                    ca, b, owned_by_allocator);
687
688         preempt_enable();
689 }
690
691 static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
692                            struct bch_fs_usage *fs_usage,
693                            u64 journal_seq, unsigned flags)
694 {
695         bool gc = flags & BCH_BUCKET_MARK_GC;
696         struct bkey_alloc_unpacked u;
697         struct bch_dev *ca;
698         struct bucket *g;
699         struct bucket_mark old, m;
700
701         /*
702          * alloc btree is read in by bch2_alloc_read, not gc:
703          */
704         if ((flags & BCH_BUCKET_MARK_GC) &&
705             !(flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE))
706                 return 0;
707
708         ca = bch_dev_bkey_exists(c, k.k->p.inode);
709
710         if (k.k->p.offset >= ca->mi.nbuckets)
711                 return 0;
712
713         g = __bucket(ca, k.k->p.offset, gc);
714         u = bch2_alloc_unpack(k);
715
716         old = bucket_cmpxchg(g, m, ({
717                 m.gen                   = u.gen;
718                 m.data_type             = u.data_type;
719                 m.dirty_sectors         = u.dirty_sectors;
720                 m.cached_sectors        = u.cached_sectors;
721
722                 if (journal_seq) {
723                         m.journal_seq_valid     = 1;
724                         m.journal_seq           = journal_seq;
725                 }
726         }));
727
728         if (!(flags & BCH_BUCKET_MARK_ALLOC_READ))
729                 bch2_dev_usage_update(c, ca, fs_usage, old, m, gc);
730
731         g->io_time[READ]        = u.read_time;
732         g->io_time[WRITE]       = u.write_time;
733         g->oldest_gen           = u.oldest_gen;
734         g->gen_valid            = 1;
735
736         /*
737          * need to know if we're getting called from the invalidate path or
738          * not:
739          */
740
741         if ((flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE) &&
742             old.cached_sectors) {
743                 update_cached_sectors(c, fs_usage, ca->dev_idx,
744                                       -old.cached_sectors);
745                 trace_invalidate(ca, bucket_to_sector(ca, k.k->p.offset),
746                                  old.cached_sectors);
747         }
748
749         return 0;
750 }
751
752 #define checked_add(a, b)                                       \
753 ({                                                              \
754         unsigned _res = (unsigned) (a) + (b);                   \
755         bool overflow = _res > U16_MAX;                         \
756         if (overflow)                                           \
757                 _res = U16_MAX;                                 \
758         (a) = _res;                                             \
759         overflow;                                               \
760 })
761
762 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
763                                        size_t b, enum bch_data_type type,
764                                        unsigned sectors, bool gc)
765 {
766         struct bucket *g = __bucket(ca, b, gc);
767         struct bucket_mark old, new;
768         bool overflow;
769
770         BUG_ON(type != BCH_DATA_SB &&
771                type != BCH_DATA_JOURNAL);
772
773         old = bucket_cmpxchg(g, new, ({
774                 new.data_type   = type;
775                 overflow = checked_add(new.dirty_sectors, sectors);
776         }));
777
778         bch2_fs_inconsistent_on(old.data_type &&
779                                 old.data_type != type, c,
780                 "different types of data in same bucket: %s, %s",
781                 bch2_data_types[old.data_type],
782                 bch2_data_types[type]);
783
784         bch2_fs_inconsistent_on(overflow, c,
785                 "bucket sector count overflow: %u + %u > U16_MAX",
786                 old.dirty_sectors, sectors);
787
788         if (c)
789                 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
790                                       old, new, gc);
791
792         return 0;
793 }
794
795 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
796                                size_t b, enum bch_data_type type,
797                                unsigned sectors, struct gc_pos pos,
798                                unsigned flags)
799 {
800         BUG_ON(type != BCH_DATA_SB &&
801                type != BCH_DATA_JOURNAL);
802
803         preempt_disable();
804
805         if (likely(c)) {
806                 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
807                            ca, b, type, sectors);
808         } else {
809                 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
810         }
811
812         preempt_enable();
813 }
814
815 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
816 {
817         return DIV_ROUND_UP(sectors * n, d);
818 }
819
820 static s64 __ptr_disk_sectors_delta(unsigned old_size,
821                                     unsigned offset, s64 delta,
822                                     unsigned flags,
823                                     unsigned n, unsigned d)
824 {
825         BUG_ON(!n || !d);
826
827         if (flags & BCH_BUCKET_MARK_OVERWRITE_SPLIT) {
828                 BUG_ON(offset + -delta > old_size);
829
830                 return -disk_sectors_scaled(n, d, old_size) +
831                         disk_sectors_scaled(n, d, offset) +
832                         disk_sectors_scaled(n, d, old_size - offset + delta);
833         } else if (flags & BCH_BUCKET_MARK_OVERWRITE) {
834                 BUG_ON(offset + -delta > old_size);
835
836                 return -disk_sectors_scaled(n, d, old_size) +
837                         disk_sectors_scaled(n, d, old_size + delta);
838         } else {
839                 return  disk_sectors_scaled(n, d, delta);
840         }
841 }
842
843 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
844                                   unsigned offset, s64 delta,
845                                   unsigned flags)
846 {
847         return __ptr_disk_sectors_delta(p.crc.live_size,
848                                         offset, delta, flags,
849                                         p.crc.compressed_size,
850                                         p.crc.uncompressed_size);
851 }
852
853 static void bucket_set_stripe(struct bch_fs *c,
854                               const struct bch_stripe *v,
855                               struct bch_fs_usage *fs_usage,
856                               u64 journal_seq,
857                               unsigned flags)
858 {
859         bool enabled = !(flags & BCH_BUCKET_MARK_OVERWRITE);
860         bool gc = flags & BCH_BUCKET_MARK_GC;
861         unsigned i;
862
863         for (i = 0; i < v->nr_blocks; i++) {
864                 const struct bch_extent_ptr *ptr = v->ptrs + i;
865                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
866                 struct bucket *g = PTR_BUCKET(ca, ptr, gc);
867                 struct bucket_mark new, old;
868
869                 old = bucket_cmpxchg(g, new, ({
870                         new.stripe                      = enabled;
871                         if (journal_seq) {
872                                 new.journal_seq_valid   = 1;
873                                 new.journal_seq         = journal_seq;
874                         }
875                 }));
876
877                 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
878
879                 /*
880                  * XXX write repair code for these, flag stripe as possibly bad
881                  */
882                 if (old.gen != ptr->gen)
883                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
884                                       "stripe with stale pointer");
885 #if 0
886                 /*
887                  * We'd like to check for these, but these checks don't work
888                  * yet:
889                  */
890                 if (old.stripe && enabled)
891                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
892                                       "multiple stripes using same bucket");
893
894                 if (!old.stripe && !enabled)
895                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
896                                       "deleting stripe but bucket not marked as stripe bucket");
897 #endif
898         }
899 }
900
901 static bool bch2_mark_pointer(struct bch_fs *c,
902                               struct extent_ptr_decoded p,
903                               s64 sectors, enum bch_data_type data_type,
904                               struct bch_fs_usage *fs_usage,
905                               u64 journal_seq, unsigned flags)
906 {
907         bool gc = flags & BCH_BUCKET_MARK_GC;
908         struct bucket_mark old, new;
909         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
910         struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
911         bool overflow;
912         u64 v;
913
914         v = atomic64_read(&g->_mark.v);
915         do {
916                 new.v.counter = old.v.counter = v;
917
918                 /*
919                  * Check this after reading bucket mark to guard against
920                  * the allocator invalidating a bucket after we've already
921                  * checked the gen
922                  */
923                 if (gen_after(p.ptr.gen, new.gen)) {
924                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
925                                       "pointer gen in the future");
926                         return true;
927                 }
928
929                 if (new.gen != p.ptr.gen) {
930                         /* XXX write repair code for this */
931                         if (!p.ptr.cached &&
932                             test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
933                                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
934                                               "stale dirty pointer");
935                         return true;
936                 }
937
938                 if (!p.ptr.cached)
939                         overflow = checked_add(new.dirty_sectors, sectors);
940                 else
941                         overflow = checked_add(new.cached_sectors, sectors);
942
943                 if (!new.dirty_sectors &&
944                     !new.cached_sectors) {
945                         new.data_type   = 0;
946
947                         if (journal_seq) {
948                                 new.journal_seq_valid = 1;
949                                 new.journal_seq = journal_seq;
950                         }
951                 } else {
952                         new.data_type = data_type;
953                 }
954
955                 if (flags & BCH_BUCKET_MARK_NOATOMIC) {
956                         g->_mark = new;
957                         break;
958                 }
959         } while ((v = atomic64_cmpxchg(&g->_mark.v,
960                               old.v.counter,
961                               new.v.counter)) != old.v.counter);
962
963         if (old.data_type && old.data_type != data_type)
964                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
965                         "bucket %u:%zu gen %u different types of data in same bucket: %s, %s",
966                         p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
967                         new.gen,
968                         bch2_data_types[old.data_type],
969                         bch2_data_types[data_type]);
970
971         bch2_fs_inconsistent_on(overflow, c,
972                 "bucket sector count overflow: %u + %lli > U16_MAX",
973                 !p.ptr.cached
974                 ? old.dirty_sectors
975                 : old.cached_sectors, sectors);
976
977         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
978
979         BUG_ON(!gc && bucket_became_unavailable(old, new));
980
981         return false;
982 }
983
984 static int bch2_mark_stripe_ptr(struct bch_fs *c,
985                                 struct bch_extent_stripe_ptr p,
986                                 enum bch_data_type data_type,
987                                 struct bch_fs_usage *fs_usage,
988                                 s64 sectors, unsigned flags,
989                                 struct bch_replicas_padded *r,
990                                 unsigned *nr_data,
991                                 unsigned *nr_parity)
992 {
993         bool gc = flags & BCH_BUCKET_MARK_GC;
994         struct stripe *m;
995         unsigned old, new;
996         int blocks_nonempty_delta;
997
998         m = genradix_ptr(&c->stripes[gc], p.idx);
999
1000         spin_lock(&c->ec_stripes_heap_lock);
1001
1002         if (!m || !m->alive) {
1003                 spin_unlock(&c->ec_stripes_heap_lock);
1004                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
1005                                     (u64) p.idx);
1006                 return -EIO;
1007         }
1008
1009         BUG_ON(m->r.e.data_type != data_type);
1010
1011         *nr_data        = m->nr_blocks - m->nr_redundant;
1012         *nr_parity      = m->nr_redundant;
1013         *r = m->r;
1014
1015         old = m->block_sectors[p.block];
1016         m->block_sectors[p.block] += sectors;
1017         new = m->block_sectors[p.block];
1018
1019         blocks_nonempty_delta = (int) !!new - (int) !!old;
1020         if (blocks_nonempty_delta) {
1021                 m->blocks_nonempty += blocks_nonempty_delta;
1022
1023                 if (!gc)
1024                         bch2_stripes_heap_update(c, m, p.idx);
1025         }
1026
1027         m->dirty = true;
1028
1029         spin_unlock(&c->ec_stripes_heap_lock);
1030
1031         return 0;
1032 }
1033
1034 static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
1035                             unsigned offset, s64 sectors,
1036                             enum bch_data_type data_type,
1037                             struct bch_fs_usage *fs_usage,
1038                             unsigned journal_seq, unsigned flags)
1039 {
1040         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1041         const union bch_extent_entry *entry;
1042         struct extent_ptr_decoded p;
1043         struct bch_replicas_padded r;
1044         s64 dirty_sectors = 0;
1045         int ret;
1046
1047         r.e.data_type   = data_type;
1048         r.e.nr_devs     = 0;
1049         r.e.nr_required = 1;
1050
1051         BUG_ON(!sectors);
1052
1053         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1054                 s64 disk_sectors = data_type == BCH_DATA_BTREE
1055                         ? sectors
1056                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1057                 bool stale = bch2_mark_pointer(c, p, disk_sectors, data_type,
1058                                                fs_usage, journal_seq, flags);
1059
1060                 if (p.ptr.cached) {
1061                         if (!stale)
1062                                 update_cached_sectors(c, fs_usage, p.ptr.dev,
1063                                                       disk_sectors);
1064                 } else if (!p.has_ec) {
1065                         dirty_sectors          += disk_sectors;
1066                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1067                 } else {
1068                         struct bch_replicas_padded ec_r;
1069                         unsigned nr_data, nr_parity;
1070                         s64 parity_sectors;
1071
1072                         ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
1073                                         fs_usage, disk_sectors, flags,
1074                                         &ec_r, &nr_data, &nr_parity);
1075                         if (ret)
1076                                 return ret;
1077
1078                         parity_sectors =
1079                                 __ptr_disk_sectors_delta(p.crc.live_size,
1080                                         offset, sectors, flags,
1081                                         p.crc.compressed_size * nr_parity,
1082                                         p.crc.uncompressed_size * nr_data);
1083
1084                         update_replicas(c, fs_usage, &ec_r.e,
1085                                         disk_sectors + parity_sectors);
1086
1087                         /*
1088                          * There may be other dirty pointers in this extent, but
1089                          * if so they're not required for mounting if we have an
1090                          * erasure coded pointer in this extent:
1091                          */
1092                         r.e.nr_required = 0;
1093                 }
1094         }
1095
1096         if (r.e.nr_devs)
1097                 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1098
1099         return 0;
1100 }
1101
1102 static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
1103                             struct bch_fs_usage *fs_usage,
1104                             u64 journal_seq, unsigned flags)
1105 {
1106         bool gc = flags & BCH_BUCKET_MARK_GC;
1107         struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
1108         size_t idx = s.k->p.offset;
1109         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1110         unsigned i;
1111
1112         spin_lock(&c->ec_stripes_heap_lock);
1113
1114         if (!m || ((flags & BCH_BUCKET_MARK_OVERWRITE) && !m->alive)) {
1115                 spin_unlock(&c->ec_stripes_heap_lock);
1116                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1117                                     idx);
1118                 return -1;
1119         }
1120
1121         if (!(flags & BCH_BUCKET_MARK_OVERWRITE)) {
1122                 m->sectors      = le16_to_cpu(s.v->sectors);
1123                 m->algorithm    = s.v->algorithm;
1124                 m->nr_blocks    = s.v->nr_blocks;
1125                 m->nr_redundant = s.v->nr_redundant;
1126
1127                 bch2_bkey_to_replicas(&m->r.e, k);
1128
1129                 /*
1130                  * XXX: account for stripes somehow here
1131                  */
1132 #if 0
1133                 update_replicas(c, fs_usage, &m->r.e, stripe_sectors);
1134 #endif
1135
1136                 /* gc recalculates these fields: */
1137                 if (!(flags & BCH_BUCKET_MARK_GC)) {
1138                         for (i = 0; i < s.v->nr_blocks; i++) {
1139                                 m->block_sectors[i] =
1140                                         stripe_blockcount_get(s.v, i);
1141                                 m->blocks_nonempty += !!m->block_sectors[i];
1142                         }
1143                 }
1144
1145                 if (!gc)
1146                         bch2_stripes_heap_update(c, m, idx);
1147                 m->alive        = true;
1148         } else {
1149                 if (!gc)
1150                         bch2_stripes_heap_del(c, m, idx);
1151                 memset(m, 0, sizeof(*m));
1152         }
1153
1154         spin_unlock(&c->ec_stripes_heap_lock);
1155
1156         bucket_set_stripe(c, s.v, fs_usage, 0, flags);
1157         return 0;
1158 }
1159
1160 int bch2_mark_key_locked(struct bch_fs *c,
1161                    struct bkey_s_c k,
1162                    unsigned offset, s64 sectors,
1163                    struct bch_fs_usage *fs_usage,
1164                    u64 journal_seq, unsigned flags)
1165 {
1166         int ret = 0;
1167
1168         preempt_disable();
1169
1170         if (!fs_usage || (flags & BCH_BUCKET_MARK_GC))
1171                 fs_usage = fs_usage_ptr(c, journal_seq,
1172                                         flags & BCH_BUCKET_MARK_GC);
1173
1174         switch (k.k->type) {
1175         case KEY_TYPE_alloc:
1176                 ret = bch2_mark_alloc(c, k, fs_usage, journal_seq, flags);
1177                 break;
1178         case KEY_TYPE_btree_ptr:
1179                 sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE)
1180                         ?  c->opts.btree_node_size
1181                         : -c->opts.btree_node_size;
1182
1183                 ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_BTREE,
1184                                 fs_usage, journal_seq, flags);
1185                 break;
1186         case KEY_TYPE_extent:
1187         case KEY_TYPE_reflink_v:
1188                 ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_USER,
1189                                 fs_usage, journal_seq, flags);
1190                 break;
1191         case KEY_TYPE_stripe:
1192                 ret = bch2_mark_stripe(c, k, fs_usage, journal_seq, flags);
1193                 break;
1194         case KEY_TYPE_inode:
1195                 if (!(flags & BCH_BUCKET_MARK_OVERWRITE))
1196                         fs_usage->nr_inodes++;
1197                 else
1198                         fs_usage->nr_inodes--;
1199                 break;
1200         case KEY_TYPE_reservation: {
1201                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1202
1203                 sectors *= replicas;
1204                 replicas = clamp_t(unsigned, replicas, 1,
1205                                    ARRAY_SIZE(fs_usage->persistent_reserved));
1206
1207                 fs_usage->reserved                              += sectors;
1208                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
1209                 break;
1210         }
1211         }
1212
1213         preempt_enable();
1214
1215         return ret;
1216 }
1217
1218 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
1219                   unsigned offset, s64 sectors,
1220                   struct bch_fs_usage *fs_usage,
1221                   u64 journal_seq, unsigned flags)
1222 {
1223         int ret;
1224
1225         percpu_down_read(&c->mark_lock);
1226         ret = bch2_mark_key_locked(c, k, offset, sectors,
1227                                    fs_usage, journal_seq, flags);
1228         percpu_up_read(&c->mark_lock);
1229
1230         return ret;
1231 }
1232
1233 inline int bch2_mark_overwrite(struct btree_trans *trans,
1234                                struct btree_iter *iter,
1235                                struct bkey_s_c old,
1236                                struct bkey_i *new,
1237                                struct bch_fs_usage *fs_usage,
1238                                unsigned flags)
1239 {
1240         struct bch_fs           *c = trans->c;
1241         struct btree            *b = iter->l[0].b;
1242         unsigned                offset = 0;
1243         s64                     sectors = 0;
1244
1245         flags |= BCH_BUCKET_MARK_OVERWRITE;
1246
1247         if (btree_node_is_extents(b)
1248             ? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0
1249             : bkey_cmp(new->k.p, old.k->p))
1250                 return 0;
1251
1252         if (btree_node_is_extents(b)) {
1253                 switch (bch2_extent_overlap(&new->k, old.k)) {
1254                 case BCH_EXTENT_OVERLAP_ALL:
1255                         offset = 0;
1256                         sectors = -((s64) old.k->size);
1257                         break;
1258                 case BCH_EXTENT_OVERLAP_BACK:
1259                         offset = bkey_start_offset(&new->k) -
1260                                 bkey_start_offset(old.k);
1261                         sectors = bkey_start_offset(&new->k) -
1262                                 old.k->p.offset;
1263                         break;
1264                 case BCH_EXTENT_OVERLAP_FRONT:
1265                         offset = 0;
1266                         sectors = bkey_start_offset(old.k) -
1267                                 new->k.p.offset;
1268                         break;
1269                 case BCH_EXTENT_OVERLAP_MIDDLE:
1270                         offset = bkey_start_offset(&new->k) -
1271                                 bkey_start_offset(old.k);
1272                         sectors = -((s64) new->k.size);
1273                         flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
1274                         break;
1275                 }
1276
1277                 BUG_ON(sectors >= 0);
1278         }
1279
1280         return bch2_mark_key_locked(c, old, offset, sectors, fs_usage,
1281                                     trans->journal_res.seq, flags) ?: 1;
1282 }
1283
1284 int bch2_mark_update(struct btree_trans *trans,
1285                      struct btree_insert_entry *insert,
1286                      struct bch_fs_usage *fs_usage,
1287                      unsigned flags)
1288 {
1289         struct bch_fs           *c = trans->c;
1290         struct btree_iter       *iter = insert->iter;
1291         struct btree            *b = iter->l[0].b;
1292         struct btree_node_iter  node_iter = iter->l[0].iter;
1293         struct bkey_packed      *_k;
1294         int ret = 0;
1295
1296         if (!btree_node_type_needs_gc(iter->btree_id))
1297                 return 0;
1298
1299         bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k),
1300                 0, insert->k->k.size,
1301                 fs_usage, trans->journal_res.seq,
1302                 BCH_BUCKET_MARK_INSERT|flags);
1303
1304         if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES))
1305                 return 0;
1306
1307         /*
1308          * For non extents, we only mark the new key, not the key being
1309          * overwritten - unless we're actually deleting:
1310          */
1311         if ((iter->btree_id == BTREE_ID_ALLOC ||
1312              iter->btree_id == BTREE_ID_EC) &&
1313             !bkey_deleted(&insert->k->k))
1314                 return 0;
1315
1316         while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
1317                                                       KEY_TYPE_discard))) {
1318                 struct bkey             unpacked;
1319                 struct bkey_s_c         k = bkey_disassemble(b, _k, &unpacked);
1320
1321                 ret = bch2_mark_overwrite(trans, iter, k, insert->k,
1322                                           fs_usage, flags);
1323                 if (ret <= 0)
1324                         break;
1325
1326                 bch2_btree_node_iter_advance(&node_iter, b);
1327         }
1328
1329         return ret;
1330 }
1331
1332 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1333                                struct bch_fs_usage_online *fs_usage)
1334 {
1335         struct bch_fs *c = trans->c;
1336         struct btree_insert_entry *i;
1337         static int warned_disk_usage = 0;
1338         u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1339         char buf[200];
1340
1341         if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1342                                  trans->journal_res.seq) ||
1343             warned_disk_usage ||
1344             xchg(&warned_disk_usage, 1))
1345                 return;
1346
1347         bch_err(c, "disk usage increased more than %llu sectors reserved",
1348                 disk_res_sectors);
1349
1350         trans_for_each_update(trans, i) {
1351                 struct btree_iter       *iter = i->iter;
1352                 struct btree            *b = iter->l[0].b;
1353                 struct btree_node_iter  node_iter = iter->l[0].iter;
1354                 struct bkey_packed      *_k;
1355
1356                 pr_err("while inserting");
1357                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1358                 pr_err("%s", buf);
1359                 pr_err("overlapping with");
1360
1361                 node_iter = iter->l[0].iter;
1362                 while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
1363                                                         KEY_TYPE_discard))) {
1364                         struct bkey             unpacked;
1365                         struct bkey_s_c         k;
1366
1367                         k = bkey_disassemble(b, _k, &unpacked);
1368
1369                         if (btree_node_is_extents(b)
1370                             ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1371                             : bkey_cmp(i->k->k.p, k.k->p))
1372                                 break;
1373
1374                         bch2_bkey_val_to_text(&PBUF(buf), c, k);
1375                         pr_err("%s", buf);
1376
1377                         bch2_btree_node_iter_advance(&node_iter, b);
1378                 }
1379         }
1380 }
1381
1382 /* trans_mark: */
1383
1384 static int trans_get_key(struct btree_trans *trans,
1385                          enum btree_id btree_id, struct bpos pos,
1386                          struct btree_iter **iter,
1387                          struct bkey_s_c *k)
1388 {
1389         struct btree_insert_entry *i;
1390         int ret;
1391
1392         trans_for_each_update(trans, i)
1393                 if (i->iter->btree_id == btree_id &&
1394                     (btree_node_type_is_extents(btree_id)
1395                      ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1396                        bkey_cmp(pos, i->k->k.p) < 0
1397                      : !bkey_cmp(pos, i->iter->pos))) {
1398                         *iter   = i->iter;
1399                         *k      = bkey_i_to_s_c(i->k);
1400                         return 1;
1401                 }
1402
1403         *iter = bch2_trans_get_iter(trans, btree_id, pos,
1404                                     BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1405         if (IS_ERR(*iter))
1406                 return PTR_ERR(*iter);
1407
1408         *k = bch2_btree_iter_peek_slot(*iter);
1409         ret = bkey_err(*k);
1410         if (ret)
1411                 bch2_trans_iter_put(trans, *iter);
1412         return ret;
1413 }
1414
1415 static void *trans_update_key(struct btree_trans *trans,
1416                               struct btree_iter *iter,
1417                               unsigned u64s)
1418 {
1419         struct btree_insert_entry *i;
1420         struct bkey_i *new_k;
1421
1422         new_k = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
1423         if (IS_ERR(new_k))
1424                 return new_k;
1425
1426         bkey_init(&new_k->k);
1427         new_k->k.p = iter->pos;
1428
1429         trans_for_each_update(trans, i)
1430                 if (i->iter == iter) {
1431                         i->k = new_k;
1432                         return new_k;
1433                 }
1434
1435         bch2_trans_update(trans, iter, new_k);
1436         return new_k;
1437 }
1438
1439 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1440                         struct extent_ptr_decoded p,
1441                         s64 sectors, enum bch_data_type data_type)
1442 {
1443         struct bch_fs *c = trans->c;
1444         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1445         struct btree_iter *iter;
1446         struct bkey_s_c k;
1447         struct bkey_alloc_unpacked u;
1448         struct bkey_i_alloc *a;
1449         unsigned old;
1450         bool overflow;
1451         int ret;
1452
1453         ret = trans_get_key(trans, BTREE_ID_ALLOC,
1454                             POS(p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr)),
1455                             &iter, &k);
1456         if (ret < 0)
1457                 return ret;
1458
1459         if (!ret) {
1460                 /*
1461                  * During journal replay, and if gc repairs alloc info at
1462                  * runtime, the alloc info in the btree might not be up to date
1463                  * yet - so, trust the in memory mark:
1464                  */
1465                 struct bucket *g;
1466                 struct bucket_mark m;
1467
1468                 percpu_down_read(&c->mark_lock);
1469                 g       = bucket(ca, iter->pos.offset);
1470                 m       = READ_ONCE(g->mark);
1471                 u       = alloc_mem_to_key(g, m);
1472                 percpu_up_read(&c->mark_lock);
1473         } else {
1474                 /*
1475                  * Unless we're already updating that key:
1476                  */
1477                 if (k.k->type != KEY_TYPE_alloc) {
1478                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1479                                       "pointer to nonexistent bucket %llu:%llu",
1480                                       iter->pos.inode, iter->pos.offset);
1481                         ret = -1;
1482                         goto out;
1483                 }
1484
1485                 u = bch2_alloc_unpack(k);
1486         }
1487
1488         if (gen_after(u.gen, p.ptr.gen)) {
1489                 ret = 1;
1490                 goto out;
1491         }
1492
1493         if (u.data_type && u.data_type != data_type) {
1494                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1495                         "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s",
1496                         iter->pos.inode, iter->pos.offset,
1497                         u.gen,
1498                         bch2_data_types[u.data_type],
1499                         bch2_data_types[data_type]);
1500                 ret = -1;
1501                 goto out;
1502         }
1503
1504         if (!p.ptr.cached) {
1505                 old = u.dirty_sectors;
1506                 overflow = checked_add(u.dirty_sectors, sectors);
1507         } else {
1508                 old = u.cached_sectors;
1509                 overflow = checked_add(u.cached_sectors, sectors);
1510         }
1511
1512         u.data_type = u.dirty_sectors || u.cached_sectors
1513                 ? data_type : 0;
1514
1515         bch2_fs_inconsistent_on(overflow, c,
1516                 "bucket sector count overflow: %u + %lli > U16_MAX",
1517                 old, sectors);
1518         BUG_ON(overflow);
1519
1520         a = trans_update_key(trans, iter, BKEY_ALLOC_U64s_MAX);
1521         ret = PTR_ERR_OR_ZERO(a);
1522         if (ret)
1523                 goto out;
1524
1525         bkey_alloc_init(&a->k_i);
1526         a->k.p = iter->pos;
1527         bch2_alloc_pack(a, u);
1528 out:
1529         bch2_trans_iter_put(trans, iter);
1530         return ret;
1531 }
1532
1533 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1534                         struct bch_extent_stripe_ptr p,
1535                         s64 sectors, enum bch_data_type data_type,
1536                         struct bch_replicas_padded *r,
1537                         unsigned *nr_data,
1538                         unsigned *nr_parity)
1539 {
1540         struct bch_fs *c = trans->c;
1541         struct btree_iter *iter;
1542         struct bkey_i *new_k;
1543         struct bkey_s_c k;
1544         struct bkey_s_stripe s;
1545         int ret = 0;
1546
1547         ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
1548         if (ret < 0)
1549                 return ret;
1550
1551         if (k.k->type != KEY_TYPE_stripe) {
1552                 bch2_fs_inconsistent(c,
1553                         "pointer to nonexistent stripe %llu",
1554                         (u64) p.idx);
1555                 ret = -EIO;
1556                 goto out;
1557         }
1558
1559         new_k = trans_update_key(trans, iter, k.k->u64s);
1560         ret = PTR_ERR_OR_ZERO(new_k);
1561         if (ret)
1562                 goto out;
1563
1564         bkey_reassemble(new_k, k);
1565         s = bkey_i_to_s_stripe(new_k);
1566
1567         stripe_blockcount_set(s.v, p.block,
1568                 stripe_blockcount_get(s.v, p.block) +
1569                 sectors);
1570
1571         *nr_data        = s.v->nr_blocks - s.v->nr_redundant;
1572         *nr_parity      = s.v->nr_redundant;
1573         bch2_bkey_to_replicas(&r->e, s.s_c);
1574 out:
1575         bch2_trans_iter_put(trans, iter);
1576         return ret;
1577 }
1578
1579 static int bch2_trans_mark_extent(struct btree_trans *trans,
1580                         struct bkey_s_c k, unsigned offset,
1581                         s64 sectors, unsigned flags,
1582                         enum bch_data_type data_type)
1583 {
1584         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1585         const union bch_extent_entry *entry;
1586         struct extent_ptr_decoded p;
1587         struct bch_replicas_padded r;
1588         s64 dirty_sectors = 0;
1589         bool stale;
1590         int ret;
1591
1592         r.e.data_type   = data_type;
1593         r.e.nr_devs     = 0;
1594         r.e.nr_required = 1;
1595
1596         BUG_ON(!sectors);
1597
1598         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1599                 s64 disk_sectors = data_type == BCH_DATA_BTREE
1600                         ? sectors
1601                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1602
1603                 ret = bch2_trans_mark_pointer(trans, p, disk_sectors,
1604                                               data_type);
1605                 if (ret < 0)
1606                         return ret;
1607
1608                 stale = ret > 0;
1609
1610                 if (p.ptr.cached) {
1611                         if (!stale)
1612                                 update_cached_sectors_list(trans, p.ptr.dev,
1613                                                            disk_sectors);
1614                 } else if (!p.has_ec) {
1615                         dirty_sectors          += disk_sectors;
1616                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1617                 } else {
1618                         struct bch_replicas_padded ec_r;
1619                         unsigned nr_data, nr_parity;
1620                         s64 parity_sectors;
1621
1622                         ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
1623                                         disk_sectors, data_type,
1624                                         &ec_r, &nr_data, &nr_parity);
1625                         if (ret)
1626                                 return ret;
1627
1628                         parity_sectors =
1629                                 __ptr_disk_sectors_delta(p.crc.live_size,
1630                                         offset, sectors, flags,
1631                                         p.crc.compressed_size * nr_parity,
1632                                         p.crc.uncompressed_size * nr_data);
1633
1634                         update_replicas_list(trans, &ec_r.e,
1635                                              disk_sectors + parity_sectors);
1636
1637                         r.e.nr_required = 0;
1638                 }
1639         }
1640
1641         if (r.e.nr_devs)
1642                 update_replicas_list(trans, &r.e, dirty_sectors);
1643
1644         return 0;
1645 }
1646
1647 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1648                         struct bkey_s_c_reflink_p p,
1649                         u64 idx, unsigned sectors,
1650                         unsigned flags)
1651 {
1652         struct bch_fs *c = trans->c;
1653         struct btree_iter *iter;
1654         struct bkey_i *new_k;
1655         struct bkey_s_c k;
1656         struct bkey_i_reflink_v *r_v;
1657         s64 ret;
1658
1659         ret = trans_get_key(trans, BTREE_ID_REFLINK,
1660                             POS(0, idx), &iter, &k);
1661         if (ret < 0)
1662                 return ret;
1663
1664         if (k.k->type != KEY_TYPE_reflink_v) {
1665                 bch2_fs_inconsistent(c,
1666                         "%llu:%llu len %u points to nonexistent indirect extent %llu",
1667                         p.k->p.inode, p.k->p.offset, p.k->size, idx);
1668                 ret = -EIO;
1669                 goto err;
1670         }
1671
1672         if ((flags & BCH_BUCKET_MARK_OVERWRITE) &&
1673             (bkey_start_offset(k.k) < idx ||
1674              k.k->p.offset > idx + sectors))
1675                 goto out;
1676
1677         bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1678         BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1679
1680         new_k = trans_update_key(trans, iter, k.k->u64s);
1681         ret = PTR_ERR_OR_ZERO(new_k);
1682         if (ret)
1683                 goto err;
1684
1685         bkey_reassemble(new_k, k);
1686         r_v = bkey_i_to_reflink_v(new_k);
1687
1688         le64_add_cpu(&r_v->v.refcount,
1689                      !(flags & BCH_BUCKET_MARK_OVERWRITE) ? 1 : -1);
1690
1691         if (!r_v->v.refcount) {
1692                 r_v->k.type = KEY_TYPE_deleted;
1693                 set_bkey_val_u64s(&r_v->k, 0);
1694         }
1695 out:
1696         ret = k.k->p.offset - idx;
1697 err:
1698         bch2_trans_iter_put(trans, iter);
1699         return ret;
1700 }
1701
1702 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1703                         struct bkey_s_c_reflink_p p, unsigned offset,
1704                         s64 sectors, unsigned flags)
1705 {
1706         u64 idx = le64_to_cpu(p.v->idx) + offset;
1707         s64 ret = 0;
1708
1709         sectors = abs(sectors);
1710         BUG_ON(offset + sectors > p.k->size);
1711
1712         while (sectors) {
1713                 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1714                 if (ret < 0)
1715                         break;
1716
1717                 idx += ret;
1718                 sectors = max_t(s64, 0LL, sectors - ret);
1719                 ret = 0;
1720         }
1721
1722         return ret;
1723 }
1724
1725 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
1726                         unsigned offset, s64 sectors, unsigned flags)
1727 {
1728         struct replicas_delta_list *d;
1729         struct bch_fs *c = trans->c;
1730
1731         switch (k.k->type) {
1732         case KEY_TYPE_btree_ptr:
1733                 sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE)
1734                         ?  c->opts.btree_node_size
1735                         : -c->opts.btree_node_size;
1736
1737                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1738                                               flags, BCH_DATA_BTREE);
1739         case KEY_TYPE_extent:
1740         case KEY_TYPE_reflink_v:
1741                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1742                                               flags, BCH_DATA_USER);
1743         case KEY_TYPE_inode:
1744                 d = replicas_deltas_realloc(trans, 0);
1745
1746                 if (!(flags & BCH_BUCKET_MARK_OVERWRITE))
1747                         d->nr_inodes++;
1748                 else
1749                         d->nr_inodes--;
1750                 return 0;
1751         case KEY_TYPE_reservation: {
1752                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1753
1754                 d = replicas_deltas_realloc(trans, 0);
1755
1756                 sectors *= replicas;
1757                 replicas = clamp_t(unsigned, replicas, 1,
1758                                    ARRAY_SIZE(d->persistent_reserved));
1759
1760                 d->persistent_reserved[replicas - 1] += sectors;
1761                 return 0;
1762         }
1763         case KEY_TYPE_reflink_p:
1764                 return bch2_trans_mark_reflink_p(trans,
1765                                         bkey_s_c_to_reflink_p(k),
1766                                         offset, sectors, flags);
1767         default:
1768                 return 0;
1769         }
1770 }
1771
1772 int bch2_trans_mark_update(struct btree_trans *trans,
1773                            struct btree_iter *iter,
1774                            struct bkey_i *insert)
1775 {
1776         struct btree            *b = iter->l[0].b;
1777         struct btree_node_iter  node_iter = iter->l[0].iter;
1778         struct bkey_packed      *_k;
1779         int ret;
1780
1781         if (!btree_node_type_needs_gc(iter->btree_id))
1782                 return 0;
1783
1784         ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
1785                         0, insert->k.size, BCH_BUCKET_MARK_INSERT);
1786         if (ret)
1787                 return ret;
1788
1789         if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES))
1790                 return 0;
1791
1792         while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
1793                                                       KEY_TYPE_discard))) {
1794                 struct bkey             unpacked;
1795                 struct bkey_s_c         k;
1796                 unsigned                offset = 0;
1797                 s64                     sectors = 0;
1798                 unsigned                flags = BCH_BUCKET_MARK_OVERWRITE;
1799
1800                 k = bkey_disassemble(b, _k, &unpacked);
1801
1802                 if (btree_node_is_extents(b)
1803                     ? bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0
1804                     : bkey_cmp(insert->k.p, k.k->p))
1805                         break;
1806
1807                 if (btree_node_is_extents(b)) {
1808                         switch (bch2_extent_overlap(&insert->k, k.k)) {
1809                         case BCH_EXTENT_OVERLAP_ALL:
1810                                 offset = 0;
1811                                 sectors = -((s64) k.k->size);
1812                                 break;
1813                         case BCH_EXTENT_OVERLAP_BACK:
1814                                 offset = bkey_start_offset(&insert->k) -
1815                                         bkey_start_offset(k.k);
1816                                 sectors = bkey_start_offset(&insert->k) -
1817                                         k.k->p.offset;
1818                                 break;
1819                         case BCH_EXTENT_OVERLAP_FRONT:
1820                                 offset = 0;
1821                                 sectors = bkey_start_offset(k.k) -
1822                                         insert->k.p.offset;
1823                                 break;
1824                         case BCH_EXTENT_OVERLAP_MIDDLE:
1825                                 offset = bkey_start_offset(&insert->k) -
1826                                         bkey_start_offset(k.k);
1827                                 sectors = -((s64) insert->k.size);
1828                                 flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
1829                                 break;
1830                         }
1831
1832                         BUG_ON(sectors >= 0);
1833                 }
1834
1835                 ret = bch2_trans_mark_key(trans, k, offset, sectors, flags);
1836                 if (ret)
1837                         return ret;
1838
1839                 bch2_btree_node_iter_advance(&node_iter, b);
1840         }
1841
1842         return 0;
1843 }
1844
1845 /* Disk reservations: */
1846
1847 #define SECTORS_CACHE   1024
1848
1849 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1850                               unsigned sectors, int flags)
1851 {
1852         struct bch_fs_pcpu *pcpu;
1853         u64 old, v, get;
1854         s64 sectors_available;
1855         int ret;
1856
1857         percpu_down_read(&c->mark_lock);
1858         preempt_disable();
1859         pcpu = this_cpu_ptr(c->pcpu);
1860
1861         if (sectors <= pcpu->sectors_available)
1862                 goto out;
1863
1864         v = atomic64_read(&c->sectors_available);
1865         do {
1866                 old = v;
1867                 get = min((u64) sectors + SECTORS_CACHE, old);
1868
1869                 if (get < sectors) {
1870                         preempt_enable();
1871                         goto recalculate;
1872                 }
1873         } while ((v = atomic64_cmpxchg(&c->sectors_available,
1874                                        old, old - get)) != old);
1875
1876         pcpu->sectors_available         += get;
1877
1878 out:
1879         pcpu->sectors_available         -= sectors;
1880         this_cpu_add(*c->online_reserved, sectors);
1881         res->sectors                    += sectors;
1882
1883         preempt_enable();
1884         percpu_up_read(&c->mark_lock);
1885         return 0;
1886
1887 recalculate:
1888         mutex_lock(&c->sectors_available_lock);
1889
1890         percpu_u64_set(&c->pcpu->sectors_available, 0);
1891         sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
1892
1893         if (sectors <= sectors_available ||
1894             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1895                 atomic64_set(&c->sectors_available,
1896                              max_t(s64, 0, sectors_available - sectors));
1897                 this_cpu_add(*c->online_reserved, sectors);
1898                 res->sectors                    += sectors;
1899                 ret = 0;
1900         } else {
1901                 atomic64_set(&c->sectors_available, sectors_available);
1902                 ret = -ENOSPC;
1903         }
1904
1905         mutex_unlock(&c->sectors_available_lock);
1906         percpu_up_read(&c->mark_lock);
1907
1908         return ret;
1909 }
1910
1911 /* Startup/shutdown: */
1912
1913 static void buckets_free_rcu(struct rcu_head *rcu)
1914 {
1915         struct bucket_array *buckets =
1916                 container_of(rcu, struct bucket_array, rcu);
1917
1918         kvpfree(buckets,
1919                 sizeof(struct bucket_array) +
1920                 buckets->nbuckets * sizeof(struct bucket));
1921 }
1922
1923 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1924 {
1925         struct bucket_array *buckets = NULL, *old_buckets = NULL;
1926         unsigned long *buckets_nouse = NULL;
1927         alloc_fifo      free[RESERVE_NR];
1928         alloc_fifo      free_inc;
1929         alloc_heap      alloc_heap;
1930         copygc_heap     copygc_heap;
1931
1932         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1933                              ca->mi.bucket_size / c->opts.btree_node_size);
1934         /* XXX: these should be tunable */
1935         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
1936         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 7);
1937         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
1938                                       btree_reserve * 2);
1939         bool resize = ca->buckets[0] != NULL,
1940              start_copygc = ca->copygc_thread != NULL;
1941         int ret = -ENOMEM;
1942         unsigned i;
1943
1944         memset(&free,           0, sizeof(free));
1945         memset(&free_inc,       0, sizeof(free_inc));
1946         memset(&alloc_heap,     0, sizeof(alloc_heap));
1947         memset(&copygc_heap,    0, sizeof(copygc_heap));
1948
1949         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
1950                                             nbuckets * sizeof(struct bucket),
1951                                             GFP_KERNEL|__GFP_ZERO)) ||
1952             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
1953                                             sizeof(unsigned long),
1954                                             GFP_KERNEL|__GFP_ZERO)) ||
1955             !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
1956             !init_fifo(&free[RESERVE_MOVINGGC],
1957                        copygc_reserve, GFP_KERNEL) ||
1958             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
1959             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
1960             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL) ||
1961             !init_heap(&copygc_heap,    copygc_reserve, GFP_KERNEL))
1962                 goto err;
1963
1964         buckets->first_bucket   = ca->mi.first_bucket;
1965         buckets->nbuckets       = nbuckets;
1966
1967         bch2_copygc_stop(ca);
1968
1969         if (resize) {
1970                 down_write(&c->gc_lock);
1971                 down_write(&ca->bucket_lock);
1972                 percpu_down_write(&c->mark_lock);
1973         }
1974
1975         old_buckets = bucket_array(ca);
1976
1977         if (resize) {
1978                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
1979
1980                 memcpy(buckets->b,
1981                        old_buckets->b,
1982                        n * sizeof(struct bucket));
1983                 memcpy(buckets_nouse,
1984                        ca->buckets_nouse,
1985                        BITS_TO_LONGS(n) * sizeof(unsigned long));
1986         }
1987
1988         rcu_assign_pointer(ca->buckets[0], buckets);
1989         buckets = old_buckets;
1990
1991         swap(ca->buckets_nouse, buckets_nouse);
1992
1993         if (resize)
1994                 percpu_up_write(&c->mark_lock);
1995
1996         spin_lock(&c->freelist_lock);
1997         for (i = 0; i < RESERVE_NR; i++) {
1998                 fifo_move(&free[i], &ca->free[i]);
1999                 swap(ca->free[i], free[i]);
2000         }
2001         fifo_move(&free_inc, &ca->free_inc);
2002         swap(ca->free_inc, free_inc);
2003         spin_unlock(&c->freelist_lock);
2004
2005         /* with gc lock held, alloc_heap can't be in use: */
2006         swap(ca->alloc_heap, alloc_heap);
2007
2008         /* and we shut down copygc: */
2009         swap(ca->copygc_heap, copygc_heap);
2010
2011         nbuckets = ca->mi.nbuckets;
2012
2013         if (resize) {
2014                 up_write(&ca->bucket_lock);
2015                 up_write(&c->gc_lock);
2016         }
2017
2018         if (start_copygc &&
2019             bch2_copygc_start(c, ca))
2020                 bch_err(ca, "error restarting copygc thread");
2021
2022         ret = 0;
2023 err:
2024         free_heap(&copygc_heap);
2025         free_heap(&alloc_heap);
2026         free_fifo(&free_inc);
2027         for (i = 0; i < RESERVE_NR; i++)
2028                 free_fifo(&free[i]);
2029         kvpfree(buckets_nouse,
2030                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2031         if (buckets)
2032                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2033
2034         return ret;
2035 }
2036
2037 void bch2_dev_buckets_free(struct bch_dev *ca)
2038 {
2039         unsigned i;
2040
2041         free_heap(&ca->copygc_heap);
2042         free_heap(&ca->alloc_heap);
2043         free_fifo(&ca->free_inc);
2044         for (i = 0; i < RESERVE_NR; i++)
2045                 free_fifo(&ca->free[i]);
2046         kvpfree(ca->buckets_nouse,
2047                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2048         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2049                 sizeof(struct bucket_array) +
2050                 ca->mi.nbuckets * sizeof(struct bucket));
2051
2052         free_percpu(ca->usage[0]);
2053 }
2054
2055 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2056 {
2057         if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
2058                 return -ENOMEM;
2059
2060         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
2061 }