bcachefs: Btree locking fix, refactoring
[linux-block.git] / fs / bcachefs / extents.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Code for managing the extent btree and dynamically updating the writeback
6  * dirty sector count.
7  */
8
9 #include "bcachefs.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_update_interior.h"
14 #include "buckets.h"
15 #include "checksum.h"
16 #include "debug.h"
17 #include "dirent.h"
18 #include "disk_groups.h"
19 #include "error.h"
20 #include "extents.h"
21 #include "inode.h"
22 #include "journal.h"
23 #include "replicas.h"
24 #include "super.h"
25 #include "super-io.h"
26 #include "trace.h"
27 #include "util.h"
28 #include "xattr.h"
29
30 static void sort_key_next(struct btree_node_iter_large *iter,
31                           struct btree *b,
32                           struct btree_node_iter_set *i)
33 {
34         i->k += __btree_node_offset_to_key(b, i->k)->u64s;
35
36         if (i->k == i->end)
37                 *i = iter->data[--iter->used];
38 }
39
40 /*
41  * Returns true if l > r - unless l == r, in which case returns true if l is
42  * older than r.
43  *
44  * Necessary for btree_sort_fixup() - if there are multiple keys that compare
45  * equal in different sets, we have to process them newest to oldest.
46  */
47 #define key_sort_cmp(h, l, r)                                           \
48 ({                                                                      \
49         bkey_cmp_packed(b,                                              \
50                         __btree_node_offset_to_key(b, (l).k),           \
51                         __btree_node_offset_to_key(b, (r).k))           \
52                                                                         \
53         ?: (l).k - (r).k;                                               \
54 })
55
56 static inline bool should_drop_next_key(struct btree_node_iter_large *iter,
57                                         struct btree *b)
58 {
59         struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
60         struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
61
62         if (bkey_whiteout(k))
63                 return true;
64
65         if (iter->used < 2)
66                 return false;
67
68         if (iter->used > 2 &&
69             key_sort_cmp(iter, r[0], r[1]) >= 0)
70                 r++;
71
72         /*
73          * key_sort_cmp() ensures that when keys compare equal the older key
74          * comes first; so if l->k compares equal to r->k then l->k is older and
75          * should be dropped.
76          */
77         return !bkey_cmp_packed(b,
78                                 __btree_node_offset_to_key(b, l->k),
79                                 __btree_node_offset_to_key(b, r->k));
80 }
81
82 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
83                                         struct btree *b,
84                                         struct btree_node_iter_large *iter)
85 {
86         struct bkey_packed *out = dst->start;
87         struct btree_nr_keys nr;
88
89         memset(&nr, 0, sizeof(nr));
90
91         heap_resort(iter, key_sort_cmp, NULL);
92
93         while (!bch2_btree_node_iter_large_end(iter)) {
94                 if (!should_drop_next_key(iter, b)) {
95                         struct bkey_packed *k =
96                                 __btree_node_offset_to_key(b, iter->data->k);
97
98                         bkey_copy(out, k);
99                         btree_keys_account_key_add(&nr, 0, out);
100                         out = bkey_next(out);
101                 }
102
103                 sort_key_next(iter, b, iter->data);
104                 heap_sift_down(iter, 0, key_sort_cmp, NULL);
105         }
106
107         dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
108         return nr;
109 }
110
111 /* Common among btree and extent ptrs */
112
113 const struct bch_extent_ptr *
114 bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
115 {
116         const struct bch_extent_ptr *ptr;
117
118         extent_for_each_ptr(e, ptr)
119                 if (ptr->dev == dev)
120                         return ptr;
121
122         return NULL;
123 }
124
125 void bch2_extent_drop_device(struct bkey_s_extent e, unsigned dev)
126 {
127         struct bch_extent_ptr *ptr;
128
129         bch2_extent_drop_ptrs(e, ptr, ptr->dev == dev);
130 }
131
132 const struct bch_extent_ptr *
133 bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
134 {
135         const struct bch_extent_ptr *ptr;
136
137         extent_for_each_ptr(e, ptr) {
138                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
139
140                 if (ca->mi.group &&
141                     ca->mi.group - 1 == group)
142                         return ptr;
143         }
144
145         return NULL;
146 }
147
148 const struct bch_extent_ptr *
149 bch2_extent_has_target(struct bch_fs *c, struct bkey_s_c_extent e, unsigned target)
150 {
151         const struct bch_extent_ptr *ptr;
152
153         extent_for_each_ptr(e, ptr)
154                 if (bch2_dev_in_target(c, ptr->dev, target) &&
155                     (!ptr->cached ||
156                      !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
157                         return ptr;
158
159         return NULL;
160 }
161
162 unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent e)
163 {
164         const struct bch_extent_ptr *ptr;
165         unsigned nr_ptrs = 0;
166
167         extent_for_each_ptr(e, ptr)
168                 nr_ptrs++;
169
170         return nr_ptrs;
171 }
172
173 unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c k)
174 {
175         struct bkey_s_c_extent e;
176         const struct bch_extent_ptr *ptr;
177         unsigned nr_ptrs = 0;
178
179         switch (k.k->type) {
180         case BCH_EXTENT:
181         case BCH_EXTENT_CACHED:
182                 e = bkey_s_c_to_extent(k);
183
184                 extent_for_each_ptr(e, ptr)
185                         nr_ptrs += !ptr->cached;
186                 break;
187
188         case BCH_RESERVATION:
189                 nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
190                 break;
191         }
192
193         return nr_ptrs;
194 }
195
196 static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
197                                            struct extent_ptr_decoded p)
198 {
199         unsigned i, durability = 0;
200         struct bch_dev *ca;
201
202         if (p.ptr.cached)
203                 return 0;
204
205         ca = bch_dev_bkey_exists(c, p.ptr.dev);
206
207         if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
208                 durability = max_t(unsigned, durability, ca->mi.durability);
209
210         for (i = 0; i < p.ec_nr; i++) {
211                 struct ec_stripe *s =
212                         genradix_ptr(&c->ec_stripes, p.idx);
213
214                 if (WARN_ON(!s))
215                         continue;
216
217                 durability = max_t(unsigned, durability, s->nr_redundant);
218         }
219
220         return durability;
221 }
222
223 unsigned bch2_extent_durability(struct bch_fs *c, struct bkey_s_c_extent e)
224 {
225         const union bch_extent_entry *entry;
226         struct extent_ptr_decoded p;
227         unsigned durability = 0;
228
229         extent_for_each_ptr_decode(e, p, entry)
230                 durability += bch2_extent_ptr_durability(c, p);
231
232         return durability;
233 }
234
235 unsigned bch2_extent_is_compressed(struct bkey_s_c k)
236 {
237         unsigned ret = 0;
238
239         switch (k.k->type) {
240         case BCH_EXTENT:
241         case BCH_EXTENT_CACHED: {
242                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
243                 const union bch_extent_entry *entry;
244                 struct extent_ptr_decoded p;
245
246                 extent_for_each_ptr_decode(e, p, entry)
247                         if (!p.ptr.cached &&
248                             p.crc.compression_type != BCH_COMPRESSION_NONE &&
249                             p.crc.compressed_size < p.crc.live_size)
250                                 ret += p.crc.compressed_size;
251         }
252         }
253
254         return ret;
255 }
256
257 bool bch2_extent_matches_ptr(struct bch_fs *c, struct bkey_s_c_extent e,
258                              struct bch_extent_ptr m, u64 offset)
259 {
260         const union bch_extent_entry *entry;
261         struct extent_ptr_decoded p;
262
263         extent_for_each_ptr_decode(e, p, entry)
264                 if (p.ptr.dev   == m.dev &&
265                     p.ptr.gen   == m.gen &&
266                     (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(e.k) ==
267                     (s64) m.offset  - offset)
268                         return true;
269
270         return false;
271 }
272
273 static union bch_extent_entry *extent_entry_prev(struct bkey_s_extent e,
274                                           union bch_extent_entry *entry)
275 {
276         union bch_extent_entry *i = e.v->start;
277
278         if (i == entry)
279                 return NULL;
280
281         while (extent_entry_next(i) != entry)
282                 i = extent_entry_next(i);
283         return i;
284 }
285
286 union bch_extent_entry *bch2_extent_drop_ptr(struct bkey_s_extent e,
287                                              struct bch_extent_ptr *ptr)
288 {
289         union bch_extent_entry *dst, *src, *prev;
290         bool drop_crc = true;
291
292         EBUG_ON(ptr < &e.v->start->ptr ||
293                 ptr >= &extent_entry_last(e)->ptr);
294         EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
295
296         src = extent_entry_next(to_entry(ptr));
297         if (src != extent_entry_last(e) &&
298             !extent_entry_is_crc(src))
299                 drop_crc = false;
300
301         dst = to_entry(ptr);
302         while ((prev = extent_entry_prev(e, dst))) {
303                 if (extent_entry_is_ptr(prev))
304                         break;
305
306                 if (extent_entry_is_crc(prev)) {
307                         if (drop_crc)
308                                 dst = prev;
309                         break;
310                 }
311
312                 dst = prev;
313         }
314
315         memmove_u64s_down(dst, src,
316                           (u64 *) extent_entry_last(e) - (u64 *) src);
317         e.k->u64s -= (u64 *) src - (u64 *) dst;
318
319         return dst;
320 }
321
322 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
323                                   struct bch_extent_crc_unpacked n)
324 {
325         return !u.compression_type &&
326                 u.csum_type &&
327                 u.uncompressed_size > u.live_size &&
328                 bch2_csum_type_is_encryption(u.csum_type) ==
329                 bch2_csum_type_is_encryption(n.csum_type);
330 }
331
332 bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent e,
333                                  struct bch_extent_crc_unpacked n)
334 {
335         struct bch_extent_crc_unpacked crc;
336         const union bch_extent_entry *i;
337
338         if (!n.csum_type)
339                 return false;
340
341         extent_for_each_crc(e, crc, i)
342                 if (can_narrow_crc(crc, n))
343                         return true;
344
345         return false;
346 }
347
348 /*
349  * We're writing another replica for this extent, so while we've got the data in
350  * memory we'll be computing a new checksum for the currently live data.
351  *
352  * If there are other replicas we aren't moving, and they are checksummed but
353  * not compressed, we can modify them to point to only the data that is
354  * currently live (so that readers won't have to bounce) while we've got the
355  * checksum we need:
356  */
357 bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
358                              struct bch_extent_crc_unpacked n)
359 {
360         struct bch_extent_crc_unpacked u;
361         struct extent_ptr_decoded p;
362         union bch_extent_entry *i;
363         bool ret = false;
364
365         /* Find a checksum entry that covers only live data: */
366         if (!n.csum_type) {
367                 extent_for_each_crc(extent_i_to_s(e), u, i)
368                         if (!u.compression_type &&
369                             u.csum_type &&
370                             u.live_size == u.uncompressed_size) {
371                                 n = u;
372                                 goto found;
373                         }
374                 return false;
375         }
376 found:
377         BUG_ON(n.compression_type);
378         BUG_ON(n.offset);
379         BUG_ON(n.live_size != e->k.size);
380
381 restart_narrow_pointers:
382         extent_for_each_ptr_decode(extent_i_to_s(e), p, i)
383                 if (can_narrow_crc(p.crc, n)) {
384                         bch2_extent_drop_ptr(extent_i_to_s(e), &i->ptr);
385                         p.ptr.offset += p.crc.offset;
386                         p.crc = n;
387                         bch2_extent_ptr_decoded_append(e, &p);
388                         ret = true;
389                         goto restart_narrow_pointers;
390                 }
391
392         return ret;
393 }
394
395 /* returns true if not equal */
396 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
397                                          struct bch_extent_crc_unpacked r)
398 {
399         return (l.csum_type             != r.csum_type ||
400                 l.compression_type      != r.compression_type ||
401                 l.compressed_size       != r.compressed_size ||
402                 l.uncompressed_size     != r.uncompressed_size ||
403                 l.offset                != r.offset ||
404                 l.live_size             != r.live_size ||
405                 l.nonce                 != r.nonce ||
406                 bch2_crc_cmp(l.csum, r.csum));
407 }
408
409 static void bch2_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e)
410 {
411         struct bch_extent_ptr *ptr;
412
413         bch2_extent_drop_ptrs(e, ptr,
414                 ptr->cached &&
415                 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
416 }
417
418 bool bch2_ptr_normalize(struct bch_fs *c, struct btree *b, struct bkey_s k)
419 {
420         return bch2_extent_normalize(c, k);
421 }
422
423 void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
424 {
425         switch (k->type) {
426         case BCH_EXTENT:
427         case BCH_EXTENT_CACHED: {
428                 union bch_extent_entry *entry;
429                 u64 *d = (u64 *) bkeyp_val(f, k);
430                 unsigned i;
431
432                 for (i = 0; i < bkeyp_val_u64s(f, k); i++)
433                         d[i] = swab64(d[i]);
434
435                 for (entry = (union bch_extent_entry *) d;
436                      entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
437                      entry = extent_entry_next(entry)) {
438                         switch (extent_entry_type(entry)) {
439                         case BCH_EXTENT_ENTRY_ptr:
440                                 break;
441                         case BCH_EXTENT_ENTRY_crc32:
442                                 entry->crc32.csum = swab32(entry->crc32.csum);
443                                 break;
444                         case BCH_EXTENT_ENTRY_crc64:
445                                 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
446                                 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
447                                 break;
448                         case BCH_EXTENT_ENTRY_crc128:
449                                 entry->crc128.csum.hi = (__force __le64)
450                                         swab64((__force u64) entry->crc128.csum.hi);
451                                 entry->crc128.csum.lo = (__force __le64)
452                                         swab64((__force u64) entry->crc128.csum.lo);
453                                 break;
454                         case BCH_EXTENT_ENTRY_stripe_ptr:
455                                 break;
456                         }
457                 }
458                 break;
459         }
460         }
461 }
462
463 static const char *extent_ptr_invalid(const struct bch_fs *c,
464                                       struct bkey_s_c_extent e,
465                                       const struct bch_extent_ptr *ptr,
466                                       unsigned size_ondisk,
467                                       bool metadata)
468 {
469         const struct bch_extent_ptr *ptr2;
470         struct bch_dev *ca;
471
472         if (ptr->dev >= c->sb.nr_devices ||
473             !c->devs[ptr->dev])
474                 return "pointer to invalid device";
475
476         ca = bch_dev_bkey_exists(c, ptr->dev);
477         if (!ca)
478                 return "pointer to invalid device";
479
480         extent_for_each_ptr(e, ptr2)
481                 if (ptr != ptr2 && ptr->dev == ptr2->dev)
482                         return "multiple pointers to same device";
483
484         if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
485                 return "offset past end of device";
486
487         if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
488                 return "offset before first bucket";
489
490         if (bucket_remainder(ca, ptr->offset) +
491             size_ondisk > ca->mi.bucket_size)
492                 return "spans multiple buckets";
493
494         return NULL;
495 }
496
497 static void extent_print_ptrs(struct printbuf *out, struct bch_fs *c,
498                               struct bkey_s_c_extent e)
499 {
500         const union bch_extent_entry *entry;
501         struct bch_extent_crc_unpacked crc;
502         const struct bch_extent_ptr *ptr;
503         const struct bch_extent_stripe_ptr *ec;
504         struct bch_dev *ca;
505         bool first = true;
506
507         extent_for_each_entry(e, entry) {
508                 if (!first)
509                         pr_buf(out, " ");
510
511                 switch (__extent_entry_type(entry)) {
512                 case BCH_EXTENT_ENTRY_ptr:
513                         ptr = entry_to_ptr(entry);
514                         ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
515                                 ? bch_dev_bkey_exists(c, ptr->dev)
516                                 : NULL;
517
518                         pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
519                                (u64) ptr->offset, ptr->gen,
520                                ptr->cached ? " cached" : "",
521                                ca && ptr_stale(ca, ptr)
522                                ? " stale" : "");
523                         break;
524                 case BCH_EXTENT_ENTRY_crc32:
525                 case BCH_EXTENT_ENTRY_crc64:
526                 case BCH_EXTENT_ENTRY_crc128:
527                         crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
528
529                         pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
530                                crc.compressed_size,
531                                crc.uncompressed_size,
532                                crc.offset, crc.nonce,
533                                crc.csum_type,
534                                crc.compression_type);
535                         break;
536                 case BCH_EXTENT_ENTRY_stripe_ptr:
537                         ec = &entry->stripe_ptr;
538
539                         pr_buf(out, "ec: idx %llu block %u",
540                                (u64) ec->idx, ec->block);
541                         break;
542                 default:
543                         pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
544                         goto out;
545                 }
546
547                 first = false;
548         }
549 out:
550         if (bkey_extent_is_cached(e.k))
551                 pr_buf(out, " cached");
552 }
553
554 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
555                                                    unsigned dev)
556 {
557         struct bch_dev_io_failures *i;
558
559         for (i = f->devs; i < f->devs + f->nr; i++)
560                 if (i->dev == dev)
561                         return i;
562
563         return NULL;
564 }
565
566 void bch2_mark_io_failure(struct bch_io_failures *failed,
567                           struct extent_ptr_decoded *p)
568 {
569         struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
570
571         if (!f) {
572                 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
573
574                 f = &failed->devs[failed->nr++];
575                 f->dev          = p->ptr.dev;
576                 f->idx          = p->idx;
577                 f->nr_failed    = 1;
578                 f->nr_retries   = 0;
579         } else if (p->idx != f->idx) {
580                 f->idx          = p->idx;
581                 f->nr_failed    = 1;
582                 f->nr_retries   = 0;
583         } else {
584                 f->nr_failed++;
585         }
586 }
587
588 /*
589  * returns true if p1 is better than p2:
590  */
591 static inline bool ptr_better(struct bch_fs *c,
592                               const struct extent_ptr_decoded p1,
593                               const struct extent_ptr_decoded p2)
594 {
595         if (likely(!p1.idx && !p2.idx)) {
596                 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
597                 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
598
599                 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
600                 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
601
602                 /* Pick at random, biased in favor of the faster device: */
603
604                 return bch2_rand_range(l1 + l2) > l1;
605         }
606
607         if (force_reconstruct_read(c))
608                 return p1.idx > p2.idx;
609
610         return p1.idx < p2.idx;
611 }
612
613 static int extent_pick_read_device(struct bch_fs *c,
614                                    struct bkey_s_c_extent e,
615                                    struct bch_io_failures *failed,
616                                    struct extent_ptr_decoded *pick)
617 {
618         const union bch_extent_entry *entry;
619         struct extent_ptr_decoded p;
620         struct bch_dev_io_failures *f;
621         struct bch_dev *ca;
622         int ret = 0;
623
624         extent_for_each_ptr_decode(e, p, entry) {
625                 ca = bch_dev_bkey_exists(c, p.ptr.dev);
626
627                 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
628                         continue;
629
630                 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
631                 if (f)
632                         p.idx = f->nr_failed < f->nr_retries
633                                 ? f->idx
634                                 : f->idx + 1;
635
636                 if (!p.idx &&
637                     !bch2_dev_is_readable(ca))
638                         p.idx++;
639
640                 if (force_reconstruct_read(c) &&
641                     !p.idx && p.ec_nr)
642                         p.idx++;
643
644                 if (p.idx >= p.ec_nr + 1)
645                         continue;
646
647                 if (ret && !ptr_better(c, p, *pick))
648                         continue;
649
650                 *pick = p;
651                 ret = 1;
652         }
653
654         return ret;
655 }
656
657 /* Btree ptrs */
658
659 const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
660 {
661         if (bkey_extent_is_cached(k.k))
662                 return "cached";
663
664         if (k.k->size)
665                 return "nonzero key size";
666
667         if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
668                 return "value too big";
669
670         switch (k.k->type) {
671         case BCH_EXTENT: {
672                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
673                 const union bch_extent_entry *entry;
674                 const struct bch_extent_ptr *ptr;
675                 const char *reason;
676
677                 extent_for_each_entry(e, entry) {
678                         if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
679                                 return "invalid extent entry type";
680
681                         if (!extent_entry_is_ptr(entry))
682                                 return "has non ptr field";
683                 }
684
685                 extent_for_each_ptr(e, ptr) {
686                         reason = extent_ptr_invalid(c, e, ptr,
687                                                     c->opts.btree_node_size,
688                                                     true);
689                         if (reason)
690                                 return reason;
691                 }
692
693                 return NULL;
694         }
695
696         default:
697                 return "invalid value type";
698         }
699 }
700
701 void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
702                                struct bkey_s_c k)
703 {
704         struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
705         const struct bch_extent_ptr *ptr;
706         unsigned seq;
707         const char *err;
708         char buf[160];
709         struct bucket_mark mark;
710         struct bch_dev *ca;
711         unsigned replicas = 0;
712         bool bad;
713
714         extent_for_each_ptr(e, ptr) {
715                 ca = bch_dev_bkey_exists(c, ptr->dev);
716                 replicas++;
717
718                 if (!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags))
719                         continue;
720
721                 err = "stale";
722                 if (ptr_stale(ca, ptr))
723                         goto err;
724
725                 do {
726                         seq = read_seqcount_begin(&c->gc_pos_lock);
727                         mark = ptr_bucket_mark(ca, ptr);
728
729                         bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
730                                 (mark.data_type != BCH_DATA_BTREE ||
731                                  mark.dirty_sectors < c->opts.btree_node_size);
732                 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
733
734                 err = "inconsistent";
735                 if (bad)
736                         goto err;
737         }
738
739         if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
740             !bch2_bkey_replicas_marked(c, btree_node_type(b),
741                                        e.s_c, false)) {
742                 bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b), k);
743                 bch2_fs_bug(c,
744                         "btree key bad (replicas not marked in superblock):\n%s",
745                         buf);
746                 return;
747         }
748
749         return;
750 err:
751         bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b), k);
752         bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
753                     err, buf, PTR_BUCKET_NR(ca, ptr),
754                     mark.gen, (unsigned) mark.v.counter);
755 }
756
757 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
758                             struct bkey_s_c k)
759 {
760         const char *invalid;
761
762         if (bkey_extent_is_data(k.k))
763                 extent_print_ptrs(out, c, bkey_s_c_to_extent(k));
764
765         invalid = bch2_btree_ptr_invalid(c, k);
766         if (invalid)
767                 pr_buf(out, " invalid: %s", invalid);
768 }
769
770 int bch2_btree_pick_ptr(struct bch_fs *c, const struct btree *b,
771                         struct bch_io_failures *failed,
772                         struct extent_ptr_decoded *pick)
773 {
774         return extent_pick_read_device(c, bkey_i_to_s_c_extent(&b->key),
775                                        failed, pick);
776 }
777
778 /* Extents */
779
780 static bool __bch2_cut_front(struct bpos where, struct bkey_s k)
781 {
782         u64 len = 0;
783
784         if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
785                 return false;
786
787         EBUG_ON(bkey_cmp(where, k.k->p) > 0);
788
789         len = k.k->p.offset - where.offset;
790
791         BUG_ON(len > k.k->size);
792
793         /*
794          * Don't readjust offset if the key size is now 0, because that could
795          * cause offset to point to the next bucket:
796          */
797         if (!len)
798                 k.k->type = KEY_TYPE_DELETED;
799         else if (bkey_extent_is_data(k.k)) {
800                 struct bkey_s_extent e = bkey_s_to_extent(k);
801                 union bch_extent_entry *entry;
802                 bool seen_crc = false;
803
804                 extent_for_each_entry(e, entry) {
805                         switch (extent_entry_type(entry)) {
806                         case BCH_EXTENT_ENTRY_ptr:
807                                 if (!seen_crc)
808                                         entry->ptr.offset += e.k->size - len;
809                                 break;
810                         case BCH_EXTENT_ENTRY_crc32:
811                                 entry->crc32.offset += e.k->size - len;
812                                 break;
813                         case BCH_EXTENT_ENTRY_crc64:
814                                 entry->crc64.offset += e.k->size - len;
815                                 break;
816                         case BCH_EXTENT_ENTRY_crc128:
817                                 entry->crc128.offset += e.k->size - len;
818                                 break;
819                         case BCH_EXTENT_ENTRY_stripe_ptr:
820                                 break;
821                         }
822
823                         if (extent_entry_is_crc(entry))
824                                 seen_crc = true;
825                 }
826         }
827
828         k.k->size = len;
829
830         return true;
831 }
832
833 bool bch2_cut_front(struct bpos where, struct bkey_i *k)
834 {
835         return __bch2_cut_front(where, bkey_i_to_s(k));
836 }
837
838 bool bch2_cut_back(struct bpos where, struct bkey *k)
839 {
840         u64 len = 0;
841
842         if (bkey_cmp(where, k->p) >= 0)
843                 return false;
844
845         EBUG_ON(bkey_cmp(where, bkey_start_pos(k)) < 0);
846
847         len = where.offset - bkey_start_offset(k);
848
849         BUG_ON(len > k->size);
850
851         k->p = where;
852         k->size = len;
853
854         if (!len)
855                 k->type = KEY_TYPE_DELETED;
856
857         return true;
858 }
859
860 /**
861  * bch_key_resize - adjust size of @k
862  *
863  * bkey_start_offset(k) will be preserved, modifies where the extent ends
864  */
865 void bch2_key_resize(struct bkey *k,
866                     unsigned new_size)
867 {
868         k->p.offset -= k->size;
869         k->p.offset += new_size;
870         k->size = new_size;
871 }
872
873 /*
874  * In extent_sort_fix_overlapping(), insert_fixup_extent(),
875  * extent_merge_inline() - we're modifying keys in place that are packed. To do
876  * that we have to unpack the key, modify the unpacked key - then this
877  * copies/repacks the unpacked to the original as necessary.
878  */
879 static void extent_save(struct btree *b, struct bkey_packed *dst,
880                         struct bkey *src)
881 {
882         struct bkey_format *f = &b->format;
883         struct bkey_i *dst_unpacked;
884
885         if ((dst_unpacked = packed_to_bkey(dst)))
886                 dst_unpacked->k = *src;
887         else
888                 BUG_ON(!bch2_bkey_pack_key(dst, src, f));
889 }
890
891 static bool extent_i_save(struct btree *b, struct bkey_packed *dst,
892                           struct bkey_i *src)
893 {
894         struct bkey_format *f = &b->format;
895         struct bkey_i *dst_unpacked;
896         struct bkey_packed tmp;
897
898         if ((dst_unpacked = packed_to_bkey(dst)))
899                 dst_unpacked->k = src->k;
900         else if (bch2_bkey_pack_key(&tmp, &src->k, f))
901                 memcpy_u64s(dst, &tmp, f->key_u64s);
902         else
903                 return false;
904
905         memcpy_u64s(bkeyp_val(f, dst), &src->v, bkey_val_u64s(&src->k));
906         return true;
907 }
908
909 /*
910  * If keys compare equal, compare by pointer order:
911  *
912  * Necessary for sort_fix_overlapping() - if there are multiple keys that
913  * compare equal in different sets, we have to process them newest to oldest.
914  */
915 #define extent_sort_cmp(h, l, r)                                        \
916 ({                                                                      \
917         struct bkey _ul = bkey_unpack_key(b,                            \
918                                 __btree_node_offset_to_key(b, (l).k));  \
919         struct bkey _ur = bkey_unpack_key(b,                            \
920                                 __btree_node_offset_to_key(b, (r).k));  \
921                                                                         \
922         bkey_cmp(bkey_start_pos(&_ul),                                  \
923                  bkey_start_pos(&_ur)) ?: (r).k - (l).k;                \
924 })
925
926 static inline void extent_sort_sift(struct btree_node_iter_large *iter,
927                                     struct btree *b, size_t i)
928 {
929         heap_sift_down(iter, i, extent_sort_cmp, NULL);
930 }
931
932 static inline void extent_sort_next(struct btree_node_iter_large *iter,
933                                     struct btree *b,
934                                     struct btree_node_iter_set *i)
935 {
936         sort_key_next(iter, b, i);
937         heap_sift_down(iter, i - iter->data, extent_sort_cmp, NULL);
938 }
939
940 static void extent_sort_append(struct bch_fs *c,
941                                struct btree *b,
942                                struct btree_nr_keys *nr,
943                                struct bkey_packed *start,
944                                struct bkey_packed **prev,
945                                struct bkey_packed *k)
946 {
947         struct bkey_format *f = &b->format;
948         BKEY_PADDED(k) tmp;
949
950         if (bkey_whiteout(k))
951                 return;
952
953         bch2_bkey_unpack(b, &tmp.k, k);
954
955         if (*prev &&
956             bch2_extent_merge(c, b, (void *) *prev, &tmp.k))
957                 return;
958
959         if (*prev) {
960                 bch2_bkey_pack(*prev, (void *) *prev, f);
961
962                 btree_keys_account_key_add(nr, 0, *prev);
963                 *prev = bkey_next(*prev);
964         } else {
965                 *prev = start;
966         }
967
968         bkey_copy(*prev, &tmp.k);
969 }
970
971 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
972                                         struct bset *dst,
973                                         struct btree *b,
974                                         struct btree_node_iter_large *iter)
975 {
976         struct bkey_format *f = &b->format;
977         struct btree_node_iter_set *_l = iter->data, *_r;
978         struct bkey_packed *prev = NULL, *out, *lk, *rk;
979         struct bkey l_unpacked, r_unpacked;
980         struct bkey_s l, r;
981         struct btree_nr_keys nr;
982
983         memset(&nr, 0, sizeof(nr));
984
985         heap_resort(iter, extent_sort_cmp, NULL);
986
987         while (!bch2_btree_node_iter_large_end(iter)) {
988                 lk = __btree_node_offset_to_key(b, _l->k);
989
990                 if (iter->used == 1) {
991                         extent_sort_append(c, b, &nr, dst->start, &prev, lk);
992                         extent_sort_next(iter, b, _l);
993                         continue;
994                 }
995
996                 _r = iter->data + 1;
997                 if (iter->used > 2 &&
998                     extent_sort_cmp(iter, _r[0], _r[1]) >= 0)
999                         _r++;
1000
1001                 rk = __btree_node_offset_to_key(b, _r->k);
1002
1003                 l = __bkey_disassemble(b, lk, &l_unpacked);
1004                 r = __bkey_disassemble(b, rk, &r_unpacked);
1005
1006                 /* If current key and next key don't overlap, just append */
1007                 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
1008                         extent_sort_append(c, b, &nr, dst->start, &prev, lk);
1009                         extent_sort_next(iter, b, _l);
1010                         continue;
1011                 }
1012
1013                 /* Skip 0 size keys */
1014                 if (!r.k->size) {
1015                         extent_sort_next(iter, b, _r);
1016                         continue;
1017                 }
1018
1019                 /*
1020                  * overlap: keep the newer key and trim the older key so they
1021                  * don't overlap. comparing pointers tells us which one is
1022                  * newer, since the bsets are appended one after the other.
1023                  */
1024
1025                 /* can't happen because of comparison func */
1026                 BUG_ON(_l->k < _r->k &&
1027                        !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
1028
1029                 if (_l->k > _r->k) {
1030                         /* l wins, trim r */
1031                         if (bkey_cmp(l.k->p, r.k->p) >= 0) {
1032                                 sort_key_next(iter, b, _r);
1033                         } else {
1034                                 __bch2_cut_front(l.k->p, r);
1035                                 extent_save(b, rk, r.k);
1036                         }
1037
1038                         extent_sort_sift(iter, b, _r - iter->data);
1039                 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
1040                         BKEY_PADDED(k) tmp;
1041
1042                         /*
1043                          * r wins, but it overlaps in the middle of l - split l:
1044                          */
1045                         bkey_reassemble(&tmp.k, l.s_c);
1046                         bch2_cut_back(bkey_start_pos(r.k), &tmp.k.k);
1047
1048                         __bch2_cut_front(r.k->p, l);
1049                         extent_save(b, lk, l.k);
1050
1051                         extent_sort_sift(iter, b, 0);
1052
1053                         extent_sort_append(c, b, &nr, dst->start, &prev,
1054                                            bkey_to_packed(&tmp.k));
1055                 } else {
1056                         bch2_cut_back(bkey_start_pos(r.k), l.k);
1057                         extent_save(b, lk, l.k);
1058                 }
1059         }
1060
1061         if (prev) {
1062                 bch2_bkey_pack(prev, (void *) prev, f);
1063                 btree_keys_account_key_add(&nr, 0, prev);
1064                 out = bkey_next(prev);
1065         } else {
1066                 out = dst->start;
1067         }
1068
1069         dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
1070         return nr;
1071 }
1072
1073 struct extent_insert_state {
1074         struct btree_insert             *trans;
1075         struct btree_insert_entry       *insert;
1076         struct bpos                     committed;
1077
1078         /* for deleting: */
1079         struct bkey_i                   whiteout;
1080         bool                            update_journal;
1081         bool                            update_btree;
1082         bool                            deleting;
1083 };
1084
1085 static bool bch2_extent_merge_inline(struct bch_fs *,
1086                                      struct btree_iter *,
1087                                      struct bkey_packed *,
1088                                      struct bkey_packed *,
1089                                      bool);
1090
1091 static void verify_extent_nonoverlapping(struct btree *b,
1092                                          struct btree_node_iter *_iter,
1093                                          struct bkey_i *insert)
1094 {
1095 #ifdef CONFIG_BCACHEFS_DEBUG
1096         struct btree_node_iter iter;
1097         struct bkey_packed *k;
1098         struct bkey uk;
1099
1100         iter = *_iter;
1101         k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_DISCARD);
1102         BUG_ON(k &&
1103                (uk = bkey_unpack_key(b, k),
1104                 bkey_cmp(uk.p, bkey_start_pos(&insert->k)) > 0));
1105
1106         iter = *_iter;
1107         k = bch2_btree_node_iter_peek_filter(&iter, b, KEY_TYPE_DISCARD);
1108 #if 0
1109         BUG_ON(k &&
1110                (uk = bkey_unpack_key(b, k),
1111                 bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0);
1112 #else
1113         if (k &&
1114             (uk = bkey_unpack_key(b, k),
1115              bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0) {
1116                 char buf1[100];
1117                 char buf2[100];
1118
1119                 bch2_bkey_to_text(&PBUF(buf1), &insert->k);
1120                 bch2_bkey_to_text(&PBUF(buf2), &uk);
1121
1122                 bch2_dump_btree_node(b);
1123                 panic("insert > next :\n"
1124                       "insert %s\n"
1125                       "next   %s\n",
1126                       buf1, buf2);
1127         }
1128 #endif
1129
1130 #endif
1131 }
1132
1133 static void verify_modified_extent(struct btree_iter *iter,
1134                                    struct bkey_packed *k)
1135 {
1136         bch2_btree_iter_verify(iter, iter->l[0].b);
1137         bch2_verify_insert_pos(iter->l[0].b, k, k, k->u64s);
1138 }
1139
1140 static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
1141                                struct bkey_i *insert)
1142 {
1143         struct btree_iter_level *l = &iter->l[0];
1144         struct btree_node_iter node_iter;
1145         struct bkey_packed *k;
1146
1147         BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
1148
1149         EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
1150         verify_extent_nonoverlapping(l->b, &l->iter, insert);
1151
1152         node_iter = l->iter;
1153         k = bch2_btree_node_iter_prev_filter(&node_iter, l->b, KEY_TYPE_DISCARD);
1154         if (k && !bkey_written(l->b, k) &&
1155             bch2_extent_merge_inline(c, iter, k, bkey_to_packed(insert), true))
1156                 return;
1157
1158         node_iter = l->iter;
1159         k = bch2_btree_node_iter_peek_filter(&node_iter, l->b, KEY_TYPE_DISCARD);
1160         if (k && !bkey_written(l->b, k) &&
1161             bch2_extent_merge_inline(c, iter, bkey_to_packed(insert), k, false))
1162                 return;
1163
1164         k = bch2_btree_node_iter_bset_pos(&l->iter, l->b, bset_tree_last(l->b));
1165
1166         bch2_bset_insert(l->b, &l->iter, k, insert, 0);
1167         bch2_btree_node_iter_fix(iter, l->b, &l->iter, k, 0, k->u64s);
1168         bch2_btree_iter_verify(iter, l->b);
1169 }
1170
1171 static void extent_insert_committed(struct extent_insert_state *s)
1172 {
1173         struct bch_fs *c = s->trans->c;
1174         struct btree_iter *iter = s->insert->iter;
1175         struct bkey_i *insert = s->insert->k;
1176         BKEY_PADDED(k) split;
1177
1178         EBUG_ON(bkey_cmp(insert->k.p, s->committed) < 0);
1179         EBUG_ON(bkey_cmp(s->committed, bkey_start_pos(&insert->k)) < 0);
1180
1181         bkey_copy(&split.k, insert);
1182         if (s->deleting)
1183                 split.k.k.type = KEY_TYPE_DISCARD;
1184
1185         bch2_cut_back(s->committed, &split.k.k);
1186
1187         if (!bkey_cmp(s->committed, iter->pos))
1188                 return;
1189
1190         bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1191
1192         if (s->update_btree) {
1193                 if (debug_check_bkeys(c))
1194                         bch2_bkey_debugcheck(c, iter->l[0].b,
1195                                              bkey_i_to_s_c(&split.k));
1196
1197                 EBUG_ON(bkey_deleted(&split.k.k) || !split.k.k.size);
1198
1199                 extent_bset_insert(c, iter, &split.k);
1200         }
1201
1202         if (s->update_journal) {
1203                 bkey_copy(&split.k, !s->deleting ? insert : &s->whiteout);
1204                 if (s->deleting)
1205                         split.k.k.type = KEY_TYPE_DISCARD;
1206
1207                 bch2_cut_back(s->committed, &split.k.k);
1208
1209                 EBUG_ON(bkey_deleted(&split.k.k) || !split.k.k.size);
1210
1211                 bch2_btree_journal_key(s->trans, iter, &split.k);
1212         }
1213
1214         bch2_cut_front(s->committed, insert);
1215
1216         insert->k.needs_whiteout        = false;
1217 }
1218
1219 void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
1220 {
1221         struct btree *b = iter->l[0].b;
1222
1223         BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1224
1225         bch2_cut_back(b->key.k.p, &k->k);
1226
1227         BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0);
1228 }
1229
1230 enum btree_insert_ret
1231 bch2_extent_can_insert(struct btree_insert *trans,
1232                        struct btree_insert_entry *insert,
1233                        unsigned *u64s)
1234 {
1235         struct btree_iter_level *l = &insert->iter->l[0];
1236         struct btree_node_iter node_iter = l->iter;
1237         enum bch_extent_overlap overlap;
1238         struct bkey_packed *_k;
1239         struct bkey unpacked;
1240         struct bkey_s_c k;
1241         int sectors;
1242
1243         BUG_ON(trans->flags & BTREE_INSERT_ATOMIC &&
1244                !bch2_extent_is_atomic(&insert->k->k, insert->iter));
1245
1246         /*
1247          * We avoid creating whiteouts whenever possible when deleting, but
1248          * those optimizations mean we may potentially insert two whiteouts
1249          * instead of one (when we overlap with the front of one extent and the
1250          * back of another):
1251          */
1252         if (bkey_whiteout(&insert->k->k))
1253                 *u64s += BKEY_U64s;
1254
1255         _k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
1256                                               KEY_TYPE_DISCARD);
1257         if (!_k)
1258                 return BTREE_INSERT_OK;
1259
1260         k = bkey_disassemble(l->b, _k, &unpacked);
1261
1262         overlap = bch2_extent_overlap(&insert->k->k, k.k);
1263
1264         /* account for having to split existing extent: */
1265         if (overlap == BCH_EXTENT_OVERLAP_MIDDLE)
1266                 *u64s += _k->u64s;
1267
1268         if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
1269             (sectors = bch2_extent_is_compressed(k))) {
1270                 int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
1271
1272                 if (trans->flags & BTREE_INSERT_NOFAIL)
1273                         flags |= BCH_DISK_RESERVATION_NOFAIL;
1274
1275                 switch (bch2_disk_reservation_add(trans->c,
1276                                 trans->disk_res,
1277                                 sectors, flags)) {
1278                 case 0:
1279                         break;
1280                 case -ENOSPC:
1281                         return BTREE_INSERT_ENOSPC;
1282                 case -EINTR:
1283                         return BTREE_INSERT_NEED_GC_LOCK;
1284                 default:
1285                         BUG();
1286                 }
1287         }
1288
1289         return BTREE_INSERT_OK;
1290 }
1291
1292 static void
1293 extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
1294               struct bkey_packed *_k, struct bkey_s k,
1295               enum bch_extent_overlap overlap)
1296 {
1297         struct bch_fs *c = s->trans->c;
1298         struct btree_iter *iter = s->insert->iter;
1299         struct btree_iter_level *l = &iter->l[0];
1300
1301         switch (overlap) {
1302         case BCH_EXTENT_OVERLAP_FRONT:
1303                 /* insert overlaps with start of k: */
1304                 __bch2_cut_front(insert->k.p, k);
1305                 BUG_ON(bkey_deleted(k.k));
1306                 extent_save(l->b, _k, k.k);
1307                 verify_modified_extent(iter, _k);
1308                 break;
1309
1310         case BCH_EXTENT_OVERLAP_BACK:
1311                 /* insert overlaps with end of k: */
1312                 bch2_cut_back(bkey_start_pos(&insert->k), k.k);
1313                 BUG_ON(bkey_deleted(k.k));
1314                 extent_save(l->b, _k, k.k);
1315
1316                 /*
1317                  * As the auxiliary tree is indexed by the end of the
1318                  * key and we've just changed the end, update the
1319                  * auxiliary tree.
1320                  */
1321                 bch2_bset_fix_invalidated_key(l->b, _k);
1322                 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1323                                          _k, _k->u64s, _k->u64s);
1324                 verify_modified_extent(iter, _k);
1325                 break;
1326
1327         case BCH_EXTENT_OVERLAP_ALL: {
1328                 /* The insert key completely covers k, invalidate k */
1329                 if (!bkey_whiteout(k.k))
1330                         btree_account_key_drop(l->b, _k);
1331
1332                 k.k->size = 0;
1333                 k.k->type = KEY_TYPE_DELETED;
1334
1335                 if (_k >= btree_bset_last(l->b)->start) {
1336                         unsigned u64s = _k->u64s;
1337
1338                         bch2_bset_delete(l->b, _k, _k->u64s);
1339                         bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1340                                                  _k, u64s, 0);
1341                         bch2_btree_iter_verify(iter, l->b);
1342                 } else {
1343                         extent_save(l->b, _k, k.k);
1344                         bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1345                                                  _k, _k->u64s, _k->u64s);
1346                         verify_modified_extent(iter, _k);
1347                 }
1348
1349                 break;
1350         }
1351         case BCH_EXTENT_OVERLAP_MIDDLE: {
1352                 BKEY_PADDED(k) split;
1353                 /*
1354                  * The insert key falls 'in the middle' of k
1355                  * The insert key splits k in 3:
1356                  * - start only in k, preserve
1357                  * - middle common section, invalidate in k
1358                  * - end only in k, preserve
1359                  *
1360                  * We update the old key to preserve the start,
1361                  * insert will be the new common section,
1362                  * we manually insert the end that we are preserving.
1363                  *
1364                  * modify k _before_ doing the insert (which will move
1365                  * what k points to)
1366                  */
1367                 bkey_reassemble(&split.k, k.s_c);
1368                 split.k.k.needs_whiteout |= bkey_written(l->b, _k);
1369
1370                 bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
1371                 BUG_ON(bkey_deleted(&split.k.k));
1372
1373                 __bch2_cut_front(insert->k.p, k);
1374                 BUG_ON(bkey_deleted(k.k));
1375                 extent_save(l->b, _k, k.k);
1376                 verify_modified_extent(iter, _k);
1377
1378                 extent_bset_insert(c, iter, &split.k);
1379                 break;
1380         }
1381         }
1382 }
1383
1384 static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
1385 {
1386         struct btree_iter *iter = s->insert->iter;
1387         struct btree_iter_level *l = &iter->l[0];
1388         struct bkey_packed *_k;
1389         struct bkey unpacked;
1390         struct bkey_i *insert = s->insert->k;
1391
1392         while (bkey_cmp(s->committed, insert->k.p) < 0 &&
1393                (_k = bch2_btree_node_iter_peek_filter(&l->iter, l->b,
1394                                                       KEY_TYPE_DISCARD))) {
1395                 struct bkey_s k = __bkey_disassemble(l->b, _k, &unpacked);
1396                 enum bch_extent_overlap overlap = bch2_extent_overlap(&insert->k, k.k);
1397
1398                 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1399
1400                 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
1401                         break;
1402
1403                 s->committed = bpos_min(s->insert->k->k.p, k.k->p);
1404
1405                 if (!bkey_whiteout(k.k))
1406                         s->update_journal = true;
1407
1408                 if (!s->update_journal) {
1409                         bch2_cut_front(s->committed, insert);
1410                         bch2_cut_front(s->committed, &s->whiteout);
1411                         bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1412                         goto next;
1413                 }
1414
1415                 /*
1416                  * When deleting, if possible just do it by switching the type
1417                  * of the key we're deleting, instead of creating and inserting
1418                  * a new whiteout:
1419                  */
1420                 if (s->deleting &&
1421                     !s->update_btree &&
1422                     !bkey_cmp(insert->k.p, k.k->p) &&
1423                     !bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
1424                         if (!bkey_whiteout(k.k)) {
1425                                 btree_account_key_drop(l->b, _k);
1426                                 _k->type = KEY_TYPE_DISCARD;
1427                                 reserve_whiteout(l->b, _k);
1428                         }
1429                         break;
1430                 }
1431
1432                 if (k.k->needs_whiteout || bkey_written(l->b, _k)) {
1433                         insert->k.needs_whiteout = true;
1434                         s->update_btree = true;
1435                 }
1436
1437                 if (s->update_btree &&
1438                     overlap == BCH_EXTENT_OVERLAP_ALL &&
1439                     bkey_whiteout(k.k) &&
1440                     k.k->needs_whiteout) {
1441                         unreserve_whiteout(l->b, _k);
1442                         _k->needs_whiteout = false;
1443                 }
1444
1445                 extent_squash(s, insert, _k, k, overlap);
1446
1447                 if (!s->update_btree)
1448                         bch2_cut_front(s->committed, insert);
1449 next:
1450                 if (overlap == BCH_EXTENT_OVERLAP_FRONT ||
1451                     overlap == BCH_EXTENT_OVERLAP_MIDDLE)
1452                         break;
1453         }
1454
1455         if (bkey_cmp(s->committed, insert->k.p) < 0)
1456                 s->committed = bpos_min(s->insert->k->k.p, l->b->key.k.p);
1457
1458         /*
1459          * may have skipped past some deleted extents greater than the insert
1460          * key, before we got to a non deleted extent and knew we could bail out
1461          * rewind the iterator a bit if necessary:
1462          */
1463         {
1464                 struct btree_node_iter node_iter = l->iter;
1465
1466                 while ((_k = bch2_btree_node_iter_prev_all(&node_iter, l->b)) &&
1467                        bkey_cmp_left_packed(l->b, _k, &s->committed) > 0)
1468                         l->iter = node_iter;
1469         }
1470 }
1471
1472 /**
1473  * bch_extent_insert_fixup - insert a new extent and deal with overlaps
1474  *
1475  * this may result in not actually doing the insert, or inserting some subset
1476  * of the insert key. For cmpxchg operations this is where that logic lives.
1477  *
1478  * All subsets of @insert that need to be inserted are inserted using
1479  * bch2_btree_insert_and_journal(). If @b or @res fills up, this function
1480  * returns false, setting @iter->pos for the prefix of @insert that actually got
1481  * inserted.
1482  *
1483  * BSET INVARIANTS: this function is responsible for maintaining all the
1484  * invariants for bsets of extents in memory. things get really hairy with 0
1485  * size extents
1486  *
1487  * within one bset:
1488  *
1489  * bkey_start_pos(bkey_next(k)) >= k
1490  * or bkey_start_offset(bkey_next(k)) >= k->offset
1491  *
1492  * i.e. strict ordering, no overlapping extents.
1493  *
1494  * multiple bsets (i.e. full btree node):
1495  *
1496  * âˆ€ k, j
1497  *   k.size != 0 âˆ§ j.size != 0 â†’
1498  *     Â¬ (k > bkey_start_pos(j) âˆ§ k < j)
1499  *
1500  * i.e. no two overlapping keys _of nonzero size_
1501  *
1502  * We can't realistically maintain this invariant for zero size keys because of
1503  * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j
1504  * there may be another 0 size key between them in another bset, and it will
1505  * thus overlap with the merged key.
1506  *
1507  * In addition, the end of iter->pos indicates how much has been processed.
1508  * If the end of iter->pos is not the same as the end of insert, then
1509  * key insertion needs to continue/be retried.
1510  */
1511 enum btree_insert_ret
1512 bch2_insert_fixup_extent(struct btree_insert *trans,
1513                          struct btree_insert_entry *insert)
1514 {
1515         struct btree_iter *iter = insert->iter;
1516         struct btree *b         = iter->l[0].b;
1517         struct extent_insert_state s = {
1518                 .trans          = trans,
1519                 .insert         = insert,
1520                 .committed      = iter->pos,
1521
1522                 .whiteout       = *insert->k,
1523                 .update_journal = !bkey_whiteout(&insert->k->k),
1524                 .update_btree   = !bkey_whiteout(&insert->k->k),
1525                 .deleting       = bkey_whiteout(&insert->k->k),
1526         };
1527
1528         EBUG_ON(iter->level);
1529         EBUG_ON(!insert->k->k.size);
1530
1531         /*
1532          * As we process overlapping extents, we advance @iter->pos both to
1533          * signal to our caller (btree_insert_key()) how much of @insert->k has
1534          * been inserted, and also to keep @iter->pos consistent with
1535          * @insert->k and the node iterator that we're advancing:
1536          */
1537         EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1538
1539         __bch2_insert_fixup_extent(&s);
1540
1541         extent_insert_committed(&s);
1542
1543         EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1544         EBUG_ON(bkey_cmp(iter->pos, s.committed));
1545
1546         if (insert->k->k.size) {
1547                 /* got to the end of this leaf node */
1548                 BUG_ON(bkey_cmp(iter->pos, b->key.k.p));
1549                 return BTREE_INSERT_NEED_TRAVERSE;
1550         }
1551
1552         return BTREE_INSERT_OK;
1553 }
1554
1555 const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
1556 {
1557         if (bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX)
1558                 return "value too big";
1559
1560         if (!k.k->size)
1561                 return "zero key size";
1562
1563         switch (k.k->type) {
1564         case BCH_EXTENT:
1565         case BCH_EXTENT_CACHED: {
1566                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1567                 const union bch_extent_entry *entry;
1568                 struct bch_extent_crc_unpacked crc;
1569                 const struct bch_extent_ptr *ptr;
1570                 unsigned size_ondisk = e.k->size;
1571                 const char *reason;
1572                 unsigned nonce = UINT_MAX;
1573
1574                 extent_for_each_entry(e, entry) {
1575                         if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1576                                 return "invalid extent entry type";
1577
1578                         switch (extent_entry_type(entry)) {
1579                         case BCH_EXTENT_ENTRY_ptr:
1580                                 ptr = entry_to_ptr(entry);
1581
1582                                 reason = extent_ptr_invalid(c, e, &entry->ptr,
1583                                                             size_ondisk, false);
1584                                 if (reason)
1585                                         return reason;
1586                                 break;
1587                         case BCH_EXTENT_ENTRY_crc32:
1588                         case BCH_EXTENT_ENTRY_crc64:
1589                         case BCH_EXTENT_ENTRY_crc128:
1590                                 crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
1591
1592                                 if (crc.offset + e.k->size >
1593                                     crc.uncompressed_size)
1594                                         return "checksum offset + key size > uncompressed size";
1595
1596                                 size_ondisk = crc.compressed_size;
1597
1598                                 if (!bch2_checksum_type_valid(c, crc.csum_type))
1599                                         return "invalid checksum type";
1600
1601                                 if (crc.compression_type >= BCH_COMPRESSION_NR)
1602                                         return "invalid compression type";
1603
1604                                 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1605                                         if (nonce == UINT_MAX)
1606                                                 nonce = crc.offset + crc.nonce;
1607                                         else if (nonce != crc.offset + crc.nonce)
1608                                                 return "incorrect nonce";
1609                                 }
1610                                 break;
1611                         case BCH_EXTENT_ENTRY_stripe_ptr:
1612                                 break;
1613                         }
1614                 }
1615
1616                 return NULL;
1617         }
1618
1619         case BCH_RESERVATION: {
1620                 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1621
1622                 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
1623                         return "incorrect value size";
1624
1625                 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
1626                         return "invalid nr_replicas";
1627
1628                 return NULL;
1629         }
1630
1631         default:
1632                 return "invalid value type";
1633         }
1634 }
1635
1636 static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
1637                                           struct bkey_s_c_extent e)
1638 {
1639         const struct bch_extent_ptr *ptr;
1640         struct bch_dev *ca;
1641         struct bucket_mark mark;
1642         unsigned seq, stale;
1643         char buf[160];
1644         bool bad;
1645         unsigned replicas = 0;
1646
1647         /*
1648          * XXX: we should be doing most/all of these checks at startup time,
1649          * where we check bch2_bkey_invalid() in btree_node_read_done()
1650          *
1651          * But note that we can't check for stale pointers or incorrect gc marks
1652          * until after journal replay is done (it might be an extent that's
1653          * going to get overwritten during replay)
1654          */
1655
1656         extent_for_each_ptr(e, ptr) {
1657                 ca = bch_dev_bkey_exists(c, ptr->dev);
1658                 replicas++;
1659
1660                 /*
1661                  * If journal replay hasn't finished, we might be seeing keys
1662                  * that will be overwritten by the time journal replay is done:
1663                  */
1664                 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1665                         continue;
1666
1667                 stale = 0;
1668
1669                 do {
1670                         seq = read_seqcount_begin(&c->gc_pos_lock);
1671                         mark = ptr_bucket_mark(ca, ptr);
1672
1673                         /* between mark and bucket gen */
1674                         smp_rmb();
1675
1676                         stale = ptr_stale(ca, ptr);
1677
1678                         bch2_fs_bug_on(stale && !ptr->cached, c,
1679                                          "stale dirty pointer");
1680
1681                         bch2_fs_bug_on(stale > 96, c,
1682                                          "key too stale: %i",
1683                                          stale);
1684
1685                         if (stale)
1686                                 break;
1687
1688                         bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
1689                                 (mark.data_type != BCH_DATA_USER ||
1690                                  !(ptr->cached
1691                                    ? mark.cached_sectors
1692                                    : mark.dirty_sectors));
1693                 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
1694
1695                 if (bad)
1696                         goto bad_ptr;
1697         }
1698
1699         if (replicas > BCH_REPLICAS_MAX) {
1700                 bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b),
1701                                       e.s_c);
1702                 bch2_fs_bug(c,
1703                         "extent key bad (too many replicas: %u): %s",
1704                         replicas, buf);
1705                 return;
1706         }
1707
1708         if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
1709             !bch2_bkey_replicas_marked(c, btree_node_type(b),
1710                                        e.s_c, false)) {
1711                 bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b),
1712                                       e.s_c);
1713                 bch2_fs_bug(c,
1714                         "extent key bad (replicas not marked in superblock):\n%s",
1715                         buf);
1716                 return;
1717         }
1718
1719         return;
1720
1721 bad_ptr:
1722         bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b),
1723                               e.s_c);
1724         bch2_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu "
1725                    "gen %i type %u", buf,
1726                    PTR_BUCKET_NR(ca, ptr), mark.gen, mark.data_type);
1727 }
1728
1729 void bch2_extent_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
1730 {
1731         switch (k.k->type) {
1732         case BCH_EXTENT:
1733         case BCH_EXTENT_CACHED:
1734                 bch2_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k));
1735                 break;
1736         case BCH_RESERVATION:
1737                 break;
1738         default:
1739                 BUG();
1740         }
1741 }
1742
1743 void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
1744                          struct bkey_s_c k)
1745 {
1746         const char *invalid;
1747
1748         if (bkey_extent_is_data(k.k))
1749                 extent_print_ptrs(out, c, bkey_s_c_to_extent(k));
1750
1751         invalid = bch2_extent_invalid(c, k);
1752         if (invalid)
1753                 pr_buf(out, " invalid: %s", invalid);
1754 }
1755
1756 static void bch2_extent_crc_init(union bch_extent_crc *crc,
1757                                  struct bch_extent_crc_unpacked new)
1758 {
1759 #define common_fields(_crc)                                             \
1760                 .csum_type              = _crc.csum_type,               \
1761                 .compression_type       = _crc.compression_type,        \
1762                 ._compressed_size       = _crc.compressed_size - 1,     \
1763                 ._uncompressed_size     = _crc.uncompressed_size - 1,   \
1764                 .offset                 = _crc.offset
1765
1766         if (bch_crc_bytes[new.csum_type]        <= 4 &&
1767             new.uncompressed_size               <= CRC32_SIZE_MAX &&
1768             new.nonce                           <= CRC32_NONCE_MAX) {
1769                 crc->crc32 = (struct bch_extent_crc32) {
1770                         .type = 1 << BCH_EXTENT_ENTRY_crc32,
1771                         common_fields(new),
1772                         .csum                   = *((__le32 *) &new.csum.lo),
1773                 };
1774                 return;
1775         }
1776
1777         if (bch_crc_bytes[new.csum_type]        <= 10 &&
1778             new.uncompressed_size               <= CRC64_SIZE_MAX &&
1779             new.nonce                           <= CRC64_NONCE_MAX) {
1780                 crc->crc64 = (struct bch_extent_crc64) {
1781                         .type = 1 << BCH_EXTENT_ENTRY_crc64,
1782                         common_fields(new),
1783                         .nonce                  = new.nonce,
1784                         .csum_lo                = new.csum.lo,
1785                         .csum_hi                = *((__le16 *) &new.csum.hi),
1786                 };
1787                 return;
1788         }
1789
1790         if (bch_crc_bytes[new.csum_type]        <= 16 &&
1791             new.uncompressed_size               <= CRC128_SIZE_MAX &&
1792             new.nonce                           <= CRC128_NONCE_MAX) {
1793                 crc->crc128 = (struct bch_extent_crc128) {
1794                         .type = 1 << BCH_EXTENT_ENTRY_crc128,
1795                         common_fields(new),
1796                         .nonce                  = new.nonce,
1797                         .csum                   = new.csum,
1798                 };
1799                 return;
1800         }
1801 #undef common_fields
1802         BUG();
1803 }
1804
1805 void bch2_extent_crc_append(struct bkey_i_extent *e,
1806                             struct bch_extent_crc_unpacked new)
1807 {
1808         bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)), new);
1809         __extent_entry_push(e);
1810 }
1811
1812 static inline void __extent_entry_insert(struct bkey_i_extent *e,
1813                                          union bch_extent_entry *dst,
1814                                          union bch_extent_entry *new)
1815 {
1816         union bch_extent_entry *end = extent_entry_last(extent_i_to_s(e));
1817
1818         memmove_u64s_up((u64 *) dst + extent_entry_u64s(new),
1819                         dst, (u64 *) end - (u64 *) dst);
1820         e->k.u64s += extent_entry_u64s(new);
1821         memcpy_u64s_small(dst, new, extent_entry_u64s(new));
1822 }
1823
1824 void bch2_extent_ptr_decoded_append(struct bkey_i_extent *e,
1825                                     struct extent_ptr_decoded *p)
1826 {
1827         struct bch_extent_crc_unpacked crc = bch2_extent_crc_unpack(&e->k, NULL);
1828         union bch_extent_entry *pos;
1829         unsigned i;
1830
1831         if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
1832                 pos = e->v.start;
1833                 goto found;
1834         }
1835
1836         extent_for_each_crc(extent_i_to_s(e), crc, pos)
1837                 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
1838                         pos = extent_entry_next(pos);
1839                         goto found;
1840                 }
1841
1842         bch2_extent_crc_append(e, p->crc);
1843         pos = extent_entry_last(extent_i_to_s(e));
1844 found:
1845         p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
1846         __extent_entry_insert(e, pos, to_entry(&p->ptr));
1847
1848         for (i = 0; i < p->ec_nr; i++) {
1849                 p->ec[i].type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
1850                 __extent_entry_insert(e, pos, to_entry(&p->ec[i]));
1851         }
1852 }
1853
1854 /*
1855  * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
1856  *
1857  * Returns true if @k should be dropped entirely
1858  *
1859  * For existing keys, only called when btree nodes are being rewritten, not when
1860  * they're merely being compacted/resorted in memory.
1861  */
1862 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
1863 {
1864         struct bkey_s_extent e;
1865
1866         switch (k.k->type) {
1867         case KEY_TYPE_ERROR:
1868                 return false;
1869
1870         case KEY_TYPE_DELETED:
1871                 return true;
1872         case KEY_TYPE_DISCARD:
1873                 return bversion_zero(k.k->version);
1874         case KEY_TYPE_COOKIE:
1875                 return false;
1876
1877         case BCH_EXTENT:
1878         case BCH_EXTENT_CACHED:
1879                 e = bkey_s_to_extent(k);
1880
1881                 bch2_extent_drop_stale(c, e);
1882
1883                 if (!bkey_val_u64s(e.k)) {
1884                         if (bkey_extent_is_cached(e.k)) {
1885                                 k.k->type = KEY_TYPE_DISCARD;
1886                                 if (bversion_zero(k.k->version))
1887                                         return true;
1888                         } else {
1889                                 k.k->type = KEY_TYPE_ERROR;
1890                         }
1891                 }
1892
1893                 return false;
1894         case BCH_RESERVATION:
1895                 return false;
1896         default:
1897                 BUG();
1898         }
1899 }
1900
1901 void bch2_extent_mark_replicas_cached(struct bch_fs *c,
1902                                       struct bkey_s_extent e,
1903                                       unsigned target,
1904                                       unsigned nr_desired_replicas)
1905 {
1906         union bch_extent_entry *entry;
1907         struct extent_ptr_decoded p;
1908         int extra = bch2_extent_durability(c, e.c) - nr_desired_replicas;
1909
1910         if (target && extra > 0)
1911                 extent_for_each_ptr_decode(e, p, entry) {
1912                         int n = bch2_extent_ptr_durability(c, p);
1913
1914                         if (n && n <= extra &&
1915                             !bch2_dev_in_target(c, p.ptr.dev, target)) {
1916                                 entry->ptr.cached = true;
1917                                 extra -= n;
1918                         }
1919                 }
1920
1921         if (extra > 0)
1922                 extent_for_each_ptr_decode(e, p, entry) {
1923                         int n = bch2_extent_ptr_durability(c, p);
1924
1925                         if (n && n <= extra) {
1926                                 entry->ptr.cached = true;
1927                                 extra -= n;
1928                         }
1929                 }
1930 }
1931
1932 /*
1933  * This picks a non-stale pointer, preferably from a device other than @avoid.
1934  * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
1935  * other devices, it will still pick a pointer from avoid.
1936  */
1937 int bch2_extent_pick_ptr(struct bch_fs *c, struct bkey_s_c k,
1938                          struct bch_io_failures *failed,
1939                          struct extent_ptr_decoded *pick)
1940 {
1941         int ret;
1942
1943         switch (k.k->type) {
1944         case KEY_TYPE_ERROR:
1945                 return -EIO;
1946
1947         case BCH_EXTENT:
1948         case BCH_EXTENT_CACHED:
1949                 ret = extent_pick_read_device(c, bkey_s_c_to_extent(k),
1950                                               failed, pick);
1951
1952                 if (!ret && !bkey_extent_is_cached(k.k))
1953                         ret = -EIO;
1954
1955                 return ret;
1956
1957         default:
1958                 return 0;
1959         }
1960 }
1961
1962 enum merge_result bch2_extent_merge(struct bch_fs *c, struct btree *b,
1963                                     struct bkey_i *l, struct bkey_i *r)
1964 {
1965         struct bkey_s_extent el, er;
1966         union bch_extent_entry *en_l, *en_r;
1967
1968         if (key_merging_disabled(c))
1969                 return BCH_MERGE_NOMERGE;
1970
1971         /*
1972          * Generic header checks
1973          * Assumes left and right are in order
1974          * Left and right must be exactly aligned
1975          */
1976
1977         if (l->k.u64s           != r->k.u64s ||
1978             l->k.type           != r->k.type ||
1979             bversion_cmp(l->k.version, r->k.version) ||
1980             bkey_cmp(l->k.p, bkey_start_pos(&r->k)))
1981                 return BCH_MERGE_NOMERGE;
1982
1983         switch (l->k.type) {
1984         case KEY_TYPE_DISCARD:
1985         case KEY_TYPE_ERROR:
1986                 /* These types are mergeable, and no val to check */
1987                 break;
1988
1989         case BCH_EXTENT:
1990         case BCH_EXTENT_CACHED:
1991                 el = bkey_i_to_s_extent(l);
1992                 er = bkey_i_to_s_extent(r);
1993
1994                 extent_for_each_entry(el, en_l) {
1995                         struct bch_extent_ptr *lp, *rp;
1996                         struct bch_dev *ca;
1997
1998                         en_r = vstruct_idx(er.v, (u64 *) en_l - el.v->_data);
1999
2000                         if ((extent_entry_type(en_l) !=
2001                              extent_entry_type(en_r)) ||
2002                             !extent_entry_is_ptr(en_l))
2003                                 return BCH_MERGE_NOMERGE;
2004
2005                         lp = &en_l->ptr;
2006                         rp = &en_r->ptr;
2007
2008                         if (lp->offset + el.k->size     != rp->offset ||
2009                             lp->dev                     != rp->dev ||
2010                             lp->gen                     != rp->gen)
2011                                 return BCH_MERGE_NOMERGE;
2012
2013                         /* We don't allow extents to straddle buckets: */
2014                         ca = bch_dev_bkey_exists(c, lp->dev);
2015
2016                         if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
2017                                 return BCH_MERGE_NOMERGE;
2018                 }
2019
2020                 break;
2021         case BCH_RESERVATION: {
2022                 struct bkey_i_reservation *li = bkey_i_to_reservation(l);
2023                 struct bkey_i_reservation *ri = bkey_i_to_reservation(r);
2024
2025                 if (li->v.generation != ri->v.generation ||
2026                     li->v.nr_replicas != ri->v.nr_replicas)
2027                         return BCH_MERGE_NOMERGE;
2028                 break;
2029         }
2030         default:
2031                 return BCH_MERGE_NOMERGE;
2032         }
2033
2034         l->k.needs_whiteout |= r->k.needs_whiteout;
2035
2036         /* Keys with no pointers aren't restricted to one bucket and could
2037          * overflow KEY_SIZE
2038          */
2039         if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
2040                 bch2_key_resize(&l->k, KEY_SIZE_MAX);
2041                 bch2_cut_front(l->k.p, r);
2042                 return BCH_MERGE_PARTIAL;
2043         }
2044
2045         bch2_key_resize(&l->k, l->k.size + r->k.size);
2046
2047         return BCH_MERGE_MERGE;
2048 }
2049
2050 /*
2051  * When merging an extent that we're inserting into a btree node, the new merged
2052  * extent could overlap with an existing 0 size extent - if we don't fix that,
2053  * it'll break the btree node iterator so this code finds those 0 size extents
2054  * and shifts them out of the way.
2055  *
2056  * Also unpacks and repacks.
2057  */
2058 static bool bch2_extent_merge_inline(struct bch_fs *c,
2059                                      struct btree_iter *iter,
2060                                      struct bkey_packed *l,
2061                                      struct bkey_packed *r,
2062                                      bool back_merge)
2063 {
2064         struct btree *b = iter->l[0].b;
2065         struct btree_node_iter *node_iter = &iter->l[0].iter;
2066         BKEY_PADDED(k) li, ri;
2067         struct bkey_packed *m   = back_merge ? l : r;
2068         struct bkey_i *mi       = back_merge ? &li.k : &ri.k;
2069         struct bset_tree *t     = bch2_bkey_to_bset(b, m);
2070         enum merge_result ret;
2071
2072         EBUG_ON(bkey_written(b, m));
2073
2074         /*
2075          * We need to save copies of both l and r, because we might get a
2076          * partial merge (which modifies both) and then fails to repack
2077          */
2078         bch2_bkey_unpack(b, &li.k, l);
2079         bch2_bkey_unpack(b, &ri.k, r);
2080
2081         ret = bch2_extent_merge(c, b, &li.k, &ri.k);
2082         if (ret == BCH_MERGE_NOMERGE)
2083                 return false;
2084
2085         /*
2086          * check if we overlap with deleted extents - would break the sort
2087          * order:
2088          */
2089         if (back_merge) {
2090                 struct bkey_packed *n = bkey_next(m);
2091
2092                 if (n != btree_bkey_last(b, t) &&
2093                     bkey_cmp_left_packed(b, n, &li.k.k.p) <= 0 &&
2094                     bkey_deleted(n))
2095                         return false;
2096         } else if (ret == BCH_MERGE_MERGE) {
2097                 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
2098
2099                 if (prev &&
2100                     bkey_cmp_left_packed_byval(b, prev,
2101                                 bkey_start_pos(&li.k.k)) > 0)
2102                         return false;
2103         }
2104
2105         if (ret == BCH_MERGE_PARTIAL) {
2106                 if (!extent_i_save(b, m, mi))
2107                         return false;
2108
2109                 if (!back_merge)
2110                         bkey_copy(packed_to_bkey(l), &li.k);
2111                 else
2112                         bkey_copy(packed_to_bkey(r), &ri.k);
2113         } else {
2114                 if (!extent_i_save(b, m, &li.k))
2115                         return false;
2116         }
2117
2118         bch2_bset_fix_invalidated_key(b, m);
2119         bch2_btree_node_iter_fix(iter, b, node_iter,
2120                                  m, m->u64s, m->u64s);
2121         verify_modified_extent(iter, m);
2122
2123         return ret == BCH_MERGE_MERGE;
2124 }
2125
2126 int bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size)
2127 {
2128         struct btree_iter iter;
2129         struct bpos end = pos;
2130         struct bkey_s_c k;
2131         int ret = 0;
2132
2133         end.offset += size;
2134
2135         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, pos,
2136                              BTREE_ITER_SLOTS, k) {
2137                 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2138                         break;
2139
2140                 if (!bch2_extent_is_fully_allocated(k)) {
2141                         ret = -ENOSPC;
2142                         break;
2143                 }
2144         }
2145         bch2_btree_iter_unlock(&iter);
2146
2147         return ret;
2148 }