Linux 6.12-rc1
[linux-block.git] / fs / bcachefs / btree_iter.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "debug.h"
13 #include "error.h"
14 #include "extents.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "replicas.h"
18 #include "snapshot.h"
19 #include "trace.h"
20
21 #include <linux/random.h>
22 #include <linux/prefetch.h>
23
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *,
26                         btree_path_idx_t, btree_path_idx_t);
27
28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29 {
30 #ifdef TRACK_PATH_ALLOCATED
31         return iter->ip_allocated;
32 #else
33         return 0;
34 #endif
35 }
36
37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38 static void bch2_trans_srcu_lock(struct btree_trans *);
39
40 static inline int __btree_path_cmp(const struct btree_path *l,
41                                    enum btree_id        r_btree_id,
42                                    bool                 r_cached,
43                                    struct bpos          r_pos,
44                                    unsigned             r_level)
45 {
46         /*
47          * Must match lock ordering as defined by __bch2_btree_node_lock:
48          */
49         return   cmp_int(l->btree_id,   r_btree_id) ?:
50                  cmp_int((int) l->cached,       (int) r_cached) ?:
51                  bpos_cmp(l->pos,       r_pos) ?:
52                 -cmp_int(l->level,      r_level);
53 }
54
55 static inline int btree_path_cmp(const struct btree_path *l,
56                                  const struct btree_path *r)
57 {
58         return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
59 }
60
61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
62 {
63         /* Are we iterating over keys in all snapshots? */
64         if (iter->flags & BTREE_ITER_all_snapshots) {
65                 p = bpos_successor(p);
66         } else {
67                 p = bpos_nosnap_successor(p);
68                 p.snapshot = iter->snapshot;
69         }
70
71         return p;
72 }
73
74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
75 {
76         /* Are we iterating over keys in all snapshots? */
77         if (iter->flags & BTREE_ITER_all_snapshots) {
78                 p = bpos_predecessor(p);
79         } else {
80                 p = bpos_nosnap_predecessor(p);
81                 p.snapshot = iter->snapshot;
82         }
83
84         return p;
85 }
86
87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
88 {
89         struct bpos pos = iter->pos;
90
91         if ((iter->flags & BTREE_ITER_is_extents) &&
92             !bkey_eq(pos, POS_MAX))
93                 pos = bkey_successor(iter, pos);
94         return pos;
95 }
96
97 static inline bool btree_path_pos_before_node(struct btree_path *path,
98                                               struct btree *b)
99 {
100         return bpos_lt(path->pos, b->data->min_key);
101 }
102
103 static inline bool btree_path_pos_after_node(struct btree_path *path,
104                                              struct btree *b)
105 {
106         return bpos_gt(path->pos, b->key.k.p);
107 }
108
109 static inline bool btree_path_pos_in_node(struct btree_path *path,
110                                           struct btree *b)
111 {
112         return path->btree_id == b->c.btree_id &&
113                 !btree_path_pos_before_node(path, b) &&
114                 !btree_path_pos_after_node(path, b);
115 }
116
117 /* Btree iterator: */
118
119 #ifdef CONFIG_BCACHEFS_DEBUG
120
121 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122                                           struct btree_path *path)
123 {
124         struct bkey_cached *ck;
125         bool locked = btree_node_locked(path, 0);
126
127         if (!bch2_btree_node_relock(trans, path, 0))
128                 return;
129
130         ck = (void *) path->l[0].b;
131         BUG_ON(ck->key.btree_id != path->btree_id ||
132                !bkey_eq(ck->key.pos, path->pos));
133
134         if (!locked)
135                 btree_node_unlock(trans, path, 0);
136 }
137
138 static void bch2_btree_path_verify_level(struct btree_trans *trans,
139                                 struct btree_path *path, unsigned level)
140 {
141         struct btree_path_level *l;
142         struct btree_node_iter tmp;
143         bool locked;
144         struct bkey_packed *p, *k;
145         struct printbuf buf1 = PRINTBUF;
146         struct printbuf buf2 = PRINTBUF;
147         struct printbuf buf3 = PRINTBUF;
148         const char *msg;
149
150         if (!bch2_debug_check_iterators)
151                 return;
152
153         l       = &path->l[level];
154         tmp     = l->iter;
155         locked  = btree_node_locked(path, level);
156
157         if (path->cached) {
158                 if (!level)
159                         bch2_btree_path_verify_cached(trans, path);
160                 return;
161         }
162
163         if (!btree_path_node(path, level))
164                 return;
165
166         if (!bch2_btree_node_relock_notrace(trans, path, level))
167                 return;
168
169         BUG_ON(!btree_path_pos_in_node(path, l->b));
170
171         bch2_btree_node_iter_verify(&l->iter, l->b);
172
173         /*
174          * For interior nodes, the iterator will have skipped past deleted keys:
175          */
176         p = level
177                 ? bch2_btree_node_iter_prev(&tmp, l->b)
178                 : bch2_btree_node_iter_prev_all(&tmp, l->b);
179         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
180
181         if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
182                 msg = "before";
183                 goto err;
184         }
185
186         if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
187                 msg = "after";
188                 goto err;
189         }
190
191         if (!locked)
192                 btree_node_unlock(trans, path, level);
193         return;
194 err:
195         bch2_bpos_to_text(&buf1, path->pos);
196
197         if (p) {
198                 struct bkey uk = bkey_unpack_key(l->b, p);
199
200                 bch2_bkey_to_text(&buf2, &uk);
201         } else {
202                 prt_printf(&buf2, "(none)");
203         }
204
205         if (k) {
206                 struct bkey uk = bkey_unpack_key(l->b, k);
207
208                 bch2_bkey_to_text(&buf3, &uk);
209         } else {
210                 prt_printf(&buf3, "(none)");
211         }
212
213         panic("path should be %s key at level %u:\n"
214               "path pos %s\n"
215               "prev key %s\n"
216               "cur  key %s\n",
217               msg, level, buf1.buf, buf2.buf, buf3.buf);
218 }
219
220 static void bch2_btree_path_verify(struct btree_trans *trans,
221                                    struct btree_path *path)
222 {
223         struct bch_fs *c = trans->c;
224
225         for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
226                 if (!path->l[i].b) {
227                         BUG_ON(!path->cached &&
228                                bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
229                         break;
230                 }
231
232                 bch2_btree_path_verify_level(trans, path, i);
233         }
234
235         bch2_btree_path_verify_locks(path);
236 }
237
238 void bch2_trans_verify_paths(struct btree_trans *trans)
239 {
240         struct btree_path *path;
241         unsigned iter;
242
243         trans_for_each_path(trans, path, iter)
244                 bch2_btree_path_verify(trans, path);
245 }
246
247 static void bch2_btree_iter_verify(struct btree_iter *iter)
248 {
249         struct btree_trans *trans = iter->trans;
250
251         BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
252
253         BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
254                (iter->flags & BTREE_ITER_all_snapshots));
255
256         BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
257                (iter->flags & BTREE_ITER_all_snapshots) &&
258                !btree_type_has_snapshot_field(iter->btree_id));
259
260         if (iter->update_path)
261                 bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
262         bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
263 }
264
265 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
266 {
267         BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
268                !iter->pos.snapshot);
269
270         BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
271                iter->pos.snapshot != iter->snapshot);
272
273         BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
274                bkey_gt(iter->pos, iter->k.p));
275 }
276
277 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
278 {
279         struct btree_trans *trans = iter->trans;
280         struct btree_iter copy;
281         struct bkey_s_c prev;
282         int ret = 0;
283
284         if (!bch2_debug_check_iterators)
285                 return 0;
286
287         if (!(iter->flags & BTREE_ITER_filter_snapshots))
288                 return 0;
289
290         if (bkey_err(k) || !k.k)
291                 return 0;
292
293         BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
294                                           iter->snapshot,
295                                           k.k->p.snapshot));
296
297         bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
298                              BTREE_ITER_nopreserve|
299                              BTREE_ITER_all_snapshots);
300         prev = bch2_btree_iter_prev(&copy);
301         if (!prev.k)
302                 goto out;
303
304         ret = bkey_err(prev);
305         if (ret)
306                 goto out;
307
308         if (bkey_eq(prev.k->p, k.k->p) &&
309             bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
310                                       prev.k->p.snapshot) > 0) {
311                 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
312
313                 bch2_bkey_to_text(&buf1, k.k);
314                 bch2_bkey_to_text(&buf2, prev.k);
315
316                 panic("iter snap %u\n"
317                       "k    %s\n"
318                       "prev %s\n",
319                       iter->snapshot,
320                       buf1.buf, buf2.buf);
321         }
322 out:
323         bch2_trans_iter_exit(trans, &copy);
324         return ret;
325 }
326
327 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
328                             struct bpos pos)
329 {
330         bch2_trans_verify_not_unlocked(trans);
331
332         struct btree_path *path;
333         struct trans_for_each_path_inorder_iter iter;
334         struct printbuf buf = PRINTBUF;
335
336         btree_trans_sort_paths(trans);
337
338         trans_for_each_path_inorder(trans, path, iter) {
339                 if (path->btree_id != id ||
340                     !btree_node_locked(path, 0) ||
341                     !path->should_be_locked)
342                         continue;
343
344                 if (!path->cached) {
345                         if (bkey_ge(pos, path->l[0].b->data->min_key) &&
346                             bkey_le(pos, path->l[0].b->key.k.p))
347                                 return;
348                 } else {
349                         if (bkey_eq(pos, path->pos))
350                                 return;
351                 }
352         }
353
354         bch2_dump_trans_paths_updates(trans);
355         bch2_bpos_to_text(&buf, pos);
356
357         panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
358 }
359
360 #else
361
362 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
363                                                 struct btree_path *path, unsigned l) {}
364 static inline void bch2_btree_path_verify(struct btree_trans *trans,
365                                           struct btree_path *path) {}
366 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
367 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
368 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
369
370 #endif
371
372 /* Btree path: fixups after btree updates */
373
374 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
375                                         struct btree *b,
376                                         struct bset_tree *t,
377                                         struct bkey_packed *k)
378 {
379         struct btree_node_iter_set *set;
380
381         btree_node_iter_for_each(iter, set)
382                 if (set->end == t->end_offset) {
383                         set->k = __btree_node_key_to_offset(b, k);
384                         bch2_btree_node_iter_sort(iter, b);
385                         return;
386                 }
387
388         bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
389 }
390
391 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
392                                                struct btree *b,
393                                                struct bkey_packed *where)
394 {
395         struct btree_path_level *l = &path->l[b->c.level];
396
397         if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
398                 return;
399
400         if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
401                 bch2_btree_node_iter_advance(&l->iter, l->b);
402 }
403
404 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
405                                       struct btree *b,
406                                       struct bkey_packed *where)
407 {
408         struct btree_path *path;
409         unsigned i;
410
411         trans_for_each_path_with_node(trans, b, path, i) {
412                 __bch2_btree_path_fix_key_modified(path, b, where);
413                 bch2_btree_path_verify_level(trans, path, b->c.level);
414         }
415 }
416
417 static void __bch2_btree_node_iter_fix(struct btree_path *path,
418                                        struct btree *b,
419                                        struct btree_node_iter *node_iter,
420                                        struct bset_tree *t,
421                                        struct bkey_packed *where,
422                                        unsigned clobber_u64s,
423                                        unsigned new_u64s)
424 {
425         const struct bkey_packed *end = btree_bkey_last(b, t);
426         struct btree_node_iter_set *set;
427         unsigned offset = __btree_node_key_to_offset(b, where);
428         int shift = new_u64s - clobber_u64s;
429         unsigned old_end = t->end_offset - shift;
430         unsigned orig_iter_pos = node_iter->data[0].k;
431         bool iter_current_key_modified =
432                 orig_iter_pos >= offset &&
433                 orig_iter_pos <= offset + clobber_u64s;
434
435         btree_node_iter_for_each(node_iter, set)
436                 if (set->end == old_end)
437                         goto found;
438
439         /* didn't find the bset in the iterator - might have to readd it: */
440         if (new_u64s &&
441             bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
442                 bch2_btree_node_iter_push(node_iter, b, where, end);
443                 goto fixup_done;
444         } else {
445                 /* Iterator is after key that changed */
446                 return;
447         }
448 found:
449         set->end = t->end_offset;
450
451         /* Iterator hasn't gotten to the key that changed yet: */
452         if (set->k < offset)
453                 return;
454
455         if (new_u64s &&
456             bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
457                 set->k = offset;
458         } else if (set->k < offset + clobber_u64s) {
459                 set->k = offset + new_u64s;
460                 if (set->k == set->end)
461                         bch2_btree_node_iter_set_drop(node_iter, set);
462         } else {
463                 /* Iterator is after key that changed */
464                 set->k = (int) set->k + shift;
465                 return;
466         }
467
468         bch2_btree_node_iter_sort(node_iter, b);
469 fixup_done:
470         if (node_iter->data[0].k != orig_iter_pos)
471                 iter_current_key_modified = true;
472
473         /*
474          * When a new key is added, and the node iterator now points to that
475          * key, the iterator might have skipped past deleted keys that should
476          * come after the key the iterator now points to. We have to rewind to
477          * before those deleted keys - otherwise
478          * bch2_btree_node_iter_prev_all() breaks:
479          */
480         if (!bch2_btree_node_iter_end(node_iter) &&
481             iter_current_key_modified &&
482             b->c.level) {
483                 struct bkey_packed *k, *k2, *p;
484
485                 k = bch2_btree_node_iter_peek_all(node_iter, b);
486
487                 for_each_bset(b, t) {
488                         bool set_pos = false;
489
490                         if (node_iter->data[0].end == t->end_offset)
491                                 continue;
492
493                         k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
494
495                         while ((p = bch2_bkey_prev_all(b, t, k2)) &&
496                                bkey_iter_cmp(b, k, p) < 0) {
497                                 k2 = p;
498                                 set_pos = true;
499                         }
500
501                         if (set_pos)
502                                 btree_node_iter_set_set_pos(node_iter,
503                                                             b, t, k2);
504                 }
505         }
506 }
507
508 void bch2_btree_node_iter_fix(struct btree_trans *trans,
509                               struct btree_path *path,
510                               struct btree *b,
511                               struct btree_node_iter *node_iter,
512                               struct bkey_packed *where,
513                               unsigned clobber_u64s,
514                               unsigned new_u64s)
515 {
516         struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
517         struct btree_path *linked;
518         unsigned i;
519
520         if (node_iter != &path->l[b->c.level].iter) {
521                 __bch2_btree_node_iter_fix(path, b, node_iter, t,
522                                            where, clobber_u64s, new_u64s);
523
524                 if (bch2_debug_check_iterators)
525                         bch2_btree_node_iter_verify(node_iter, b);
526         }
527
528         trans_for_each_path_with_node(trans, b, linked, i) {
529                 __bch2_btree_node_iter_fix(linked, b,
530                                            &linked->l[b->c.level].iter, t,
531                                            where, clobber_u64s, new_u64s);
532                 bch2_btree_path_verify_level(trans, linked, b->c.level);
533         }
534 }
535
536 /* Btree path level: pointer to a particular btree node and node iter */
537
538 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
539                                                   struct btree_path_level *l,
540                                                   struct bkey *u,
541                                                   struct bkey_packed *k)
542 {
543         if (unlikely(!k)) {
544                 /*
545                  * signal to bch2_btree_iter_peek_slot() that we're currently at
546                  * a hole
547                  */
548                 u->type = KEY_TYPE_deleted;
549                 return bkey_s_c_null;
550         }
551
552         return bkey_disassemble(l->b, k, u);
553 }
554
555 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
556                                                         struct btree_path_level *l,
557                                                         struct bkey *u)
558 {
559         return __btree_iter_unpack(c, l, u,
560                         bch2_btree_node_iter_peek_all(&l->iter, l->b));
561 }
562
563 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
564                                                     struct btree_path *path,
565                                                     struct btree_path_level *l,
566                                                     struct bkey *u)
567 {
568         struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
569                         bch2_btree_node_iter_peek(&l->iter, l->b));
570
571         path->pos = k.k ? k.k->p : l->b->key.k.p;
572         trans->paths_sorted = false;
573         bch2_btree_path_verify_level(trans, path, l - path->l);
574         return k;
575 }
576
577 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
578                                                     struct btree_path *path,
579                                                     struct btree_path_level *l,
580                                                     struct bkey *u)
581 {
582         struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
583                         bch2_btree_node_iter_prev(&l->iter, l->b));
584
585         path->pos = k.k ? k.k->p : l->b->data->min_key;
586         trans->paths_sorted = false;
587         bch2_btree_path_verify_level(trans, path, l - path->l);
588         return k;
589 }
590
591 static inline bool btree_path_advance_to_pos(struct btree_path *path,
592                                              struct btree_path_level *l,
593                                              int max_advance)
594 {
595         struct bkey_packed *k;
596         int nr_advanced = 0;
597
598         while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
599                bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
600                 if (max_advance > 0 && nr_advanced >= max_advance)
601                         return false;
602
603                 bch2_btree_node_iter_advance(&l->iter, l->b);
604                 nr_advanced++;
605         }
606
607         return true;
608 }
609
610 static inline void __btree_path_level_init(struct btree_path *path,
611                                            unsigned level)
612 {
613         struct btree_path_level *l = &path->l[level];
614
615         bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
616
617         /*
618          * Iterators to interior nodes should always be pointed at the first non
619          * whiteout:
620          */
621         if (level)
622                 bch2_btree_node_iter_peek(&l->iter, l->b);
623 }
624
625 void bch2_btree_path_level_init(struct btree_trans *trans,
626                                 struct btree_path *path,
627                                 struct btree *b)
628 {
629         BUG_ON(path->cached);
630
631         EBUG_ON(!btree_path_pos_in_node(path, b));
632
633         path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
634         path->l[b->c.level].b = b;
635         __btree_path_level_init(path, b->c.level);
636 }
637
638 /* Btree path: fixups after btree node updates: */
639
640 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
641 {
642         struct bch_fs *c = trans->c;
643
644         trans_for_each_update(trans, i)
645                 if (!i->cached &&
646                     i->level    == b->c.level &&
647                     i->btree_id == b->c.btree_id &&
648                     bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
649                     bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
650                         i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
651
652                         if (unlikely(trans->journal_replay_not_finished)) {
653                                 struct bkey_i *j_k =
654                                         bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
655                                                                     i->k->k.p);
656
657                                 if (j_k) {
658                                         i->old_k = j_k->k;
659                                         i->old_v = &j_k->v;
660                                 }
661                         }
662                 }
663 }
664
665 /*
666  * A btree node is being replaced - update the iterator to point to the new
667  * node:
668  */
669 void bch2_trans_node_add(struct btree_trans *trans,
670                          struct btree_path *path,
671                          struct btree *b)
672 {
673         struct btree_path *prev;
674
675         BUG_ON(!btree_path_pos_in_node(path, b));
676
677         while ((prev = prev_btree_path(trans, path)) &&
678                btree_path_pos_in_node(prev, b))
679                 path = prev;
680
681         for (;
682              path && btree_path_pos_in_node(path, b);
683              path = next_btree_path(trans, path))
684                 if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
685                         enum btree_node_locked_type t =
686                                 btree_lock_want(path, b->c.level);
687
688                         if (t != BTREE_NODE_UNLOCKED) {
689                                 btree_node_unlock(trans, path, b->c.level);
690                                 six_lock_increment(&b->c.lock, (enum six_lock_type) t);
691                                 mark_btree_node_locked(trans, path, b->c.level, t);
692                         }
693
694                         bch2_btree_path_level_init(trans, path, b);
695                 }
696
697         bch2_trans_revalidate_updates_in_node(trans, b);
698 }
699
700 /*
701  * A btree node has been modified in such a way as to invalidate iterators - fix
702  * them:
703  */
704 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
705 {
706         struct btree_path *path;
707         unsigned i;
708
709         trans_for_each_path_with_node(trans, b, path, i)
710                 __btree_path_level_init(path, b->c.level);
711
712         bch2_trans_revalidate_updates_in_node(trans, b);
713 }
714
715 /* Btree path: traverse, set_pos: */
716
717 static inline int btree_path_lock_root(struct btree_trans *trans,
718                                        struct btree_path *path,
719                                        unsigned depth_want,
720                                        unsigned long trace_ip)
721 {
722         struct bch_fs *c = trans->c;
723         struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
724         enum six_lock_type lock_type;
725         unsigned i;
726         int ret;
727
728         EBUG_ON(path->nodes_locked);
729
730         while (1) {
731                 b = READ_ONCE(*rootp);
732                 path->level = READ_ONCE(b->c.level);
733
734                 if (unlikely(path->level < depth_want)) {
735                         /*
736                          * the root is at a lower depth than the depth we want:
737                          * got to the end of the btree, or we're walking nodes
738                          * greater than some depth and there are no nodes >=
739                          * that depth
740                          */
741                         path->level = depth_want;
742                         for (i = path->level; i < BTREE_MAX_DEPTH; i++)
743                                 path->l[i].b = NULL;
744                         return 1;
745                 }
746
747                 lock_type = __btree_lock_want(path, path->level);
748                 ret = btree_node_lock(trans, path, &b->c,
749                                       path->level, lock_type, trace_ip);
750                 if (unlikely(ret)) {
751                         if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
752                                 continue;
753                         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
754                                 return ret;
755                         BUG();
756                 }
757
758                 if (likely(b == READ_ONCE(*rootp) &&
759                            b->c.level == path->level &&
760                            !race_fault())) {
761                         for (i = 0; i < path->level; i++)
762                                 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
763                         path->l[path->level].b = b;
764                         for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
765                                 path->l[i].b = NULL;
766
767                         mark_btree_node_locked(trans, path, path->level,
768                                                (enum btree_node_locked_type) lock_type);
769                         bch2_btree_path_level_init(trans, path, b);
770                         return 0;
771                 }
772
773                 six_unlock_type(&b->c.lock, lock_type);
774         }
775 }
776
777 noinline
778 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
779 {
780         struct bch_fs *c = trans->c;
781         struct btree_path_level *l = path_l(path);
782         struct btree_node_iter node_iter = l->iter;
783         struct bkey_packed *k;
784         struct bkey_buf tmp;
785         unsigned nr = test_bit(BCH_FS_started, &c->flags)
786                 ? (path->level > 1 ? 0 :  2)
787                 : (path->level > 1 ? 1 : 16);
788         bool was_locked = btree_node_locked(path, path->level);
789         int ret = 0;
790
791         bch2_bkey_buf_init(&tmp);
792
793         while (nr-- && !ret) {
794                 if (!bch2_btree_node_relock(trans, path, path->level))
795                         break;
796
797                 bch2_btree_node_iter_advance(&node_iter, l->b);
798                 k = bch2_btree_node_iter_peek(&node_iter, l->b);
799                 if (!k)
800                         break;
801
802                 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
803                 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
804                                                path->level - 1);
805         }
806
807         if (!was_locked)
808                 btree_node_unlock(trans, path, path->level);
809
810         bch2_bkey_buf_exit(&tmp, c);
811         return ret;
812 }
813
814 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
815                                  struct btree_and_journal_iter *jiter)
816 {
817         struct bch_fs *c = trans->c;
818         struct bkey_s_c k;
819         struct bkey_buf tmp;
820         unsigned nr = test_bit(BCH_FS_started, &c->flags)
821                 ? (path->level > 1 ? 0 :  2)
822                 : (path->level > 1 ? 1 : 16);
823         bool was_locked = btree_node_locked(path, path->level);
824         int ret = 0;
825
826         bch2_bkey_buf_init(&tmp);
827
828         while (nr-- && !ret) {
829                 if (!bch2_btree_node_relock(trans, path, path->level))
830                         break;
831
832                 bch2_btree_and_journal_iter_advance(jiter);
833                 k = bch2_btree_and_journal_iter_peek(jiter);
834                 if (!k.k)
835                         break;
836
837                 bch2_bkey_buf_reassemble(&tmp, c, k);
838                 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
839                                                path->level - 1);
840         }
841
842         if (!was_locked)
843                 btree_node_unlock(trans, path, path->level);
844
845         bch2_bkey_buf_exit(&tmp, c);
846         return ret;
847 }
848
849 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
850                                             struct btree_path *path,
851                                             unsigned plevel, struct btree *b)
852 {
853         struct btree_path_level *l = &path->l[plevel];
854         bool locked = btree_node_locked(path, plevel);
855         struct bkey_packed *k;
856         struct bch_btree_ptr_v2 *bp;
857
858         if (!bch2_btree_node_relock(trans, path, plevel))
859                 return;
860
861         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
862         BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
863
864         bp = (void *) bkeyp_val(&l->b->format, k);
865         bp->mem_ptr = (unsigned long)b;
866
867         if (!locked)
868                 btree_node_unlock(trans, path, plevel);
869 }
870
871 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
872                                                      struct btree_path *path,
873                                                      unsigned flags,
874                                                      struct bkey_buf *out)
875 {
876         struct bch_fs *c = trans->c;
877         struct btree_path_level *l = path_l(path);
878         struct btree_and_journal_iter jiter;
879         struct bkey_s_c k;
880         int ret = 0;
881
882         __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
883
884         k = bch2_btree_and_journal_iter_peek(&jiter);
885
886         bch2_bkey_buf_reassemble(out, c, k);
887
888         if ((flags & BTREE_ITER_prefetch) &&
889             c->opts.btree_node_prefetch)
890                 ret = btree_path_prefetch_j(trans, path, &jiter);
891
892         bch2_btree_and_journal_iter_exit(&jiter);
893         return ret;
894 }
895
896 static __always_inline int btree_path_down(struct btree_trans *trans,
897                                            struct btree_path *path,
898                                            unsigned flags,
899                                            unsigned long trace_ip)
900 {
901         struct bch_fs *c = trans->c;
902         struct btree_path_level *l = path_l(path);
903         struct btree *b;
904         unsigned level = path->level - 1;
905         enum six_lock_type lock_type = __btree_lock_want(path, level);
906         struct bkey_buf tmp;
907         int ret;
908
909         EBUG_ON(!btree_node_locked(path, path->level));
910
911         bch2_bkey_buf_init(&tmp);
912
913         if (unlikely(trans->journal_replay_not_finished)) {
914                 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
915                 if (ret)
916                         goto err;
917         } else {
918                 struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
919                 if (!k) {
920                         struct printbuf buf = PRINTBUF;
921
922                         prt_str(&buf, "node not found at pos ");
923                         bch2_bpos_to_text(&buf, path->pos);
924                         prt_str(&buf, " within parent node ");
925                         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
926
927                         bch2_fs_fatal_error(c, "%s", buf.buf);
928                         printbuf_exit(&buf);
929                         ret = -BCH_ERR_btree_need_topology_repair;
930                         goto err;
931                 }
932
933                 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
934
935                 if ((flags & BTREE_ITER_prefetch) &&
936                     c->opts.btree_node_prefetch) {
937                         ret = btree_path_prefetch(trans, path);
938                         if (ret)
939                                 goto err;
940                 }
941         }
942
943         b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
944         ret = PTR_ERR_OR_ZERO(b);
945         if (unlikely(ret))
946                 goto err;
947
948         if (likely(!trans->journal_replay_not_finished &&
949                    tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
950             unlikely(b != btree_node_mem_ptr(tmp.k)))
951                 btree_node_mem_ptr_set(trans, path, level + 1, b);
952
953         if (btree_node_read_locked(path, level + 1))
954                 btree_node_unlock(trans, path, level + 1);
955
956         mark_btree_node_locked(trans, path, level,
957                                (enum btree_node_locked_type) lock_type);
958         path->level = level;
959         bch2_btree_path_level_init(trans, path, b);
960
961         bch2_btree_path_verify_locks(path);
962 err:
963         bch2_bkey_buf_exit(&tmp, c);
964         return ret;
965 }
966
967 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
968 {
969         struct bch_fs *c = trans->c;
970         struct btree_path *path;
971         unsigned long trace_ip = _RET_IP_;
972         unsigned i;
973         int ret = 0;
974
975         if (trans->in_traverse_all)
976                 return -BCH_ERR_transaction_restart_in_traverse_all;
977
978         trans->in_traverse_all = true;
979 retry_all:
980         trans->restarted = 0;
981         trans->last_restarted_ip = 0;
982
983         trans_for_each_path(trans, path, i)
984                 path->should_be_locked = false;
985
986         btree_trans_sort_paths(trans);
987
988         bch2_trans_unlock(trans);
989         cond_resched();
990         trans_set_locked(trans);
991
992         if (unlikely(trans->memory_allocation_failure)) {
993                 struct closure cl;
994
995                 closure_init_stack(&cl);
996
997                 do {
998                         ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
999                         closure_sync(&cl);
1000                 } while (ret);
1001         }
1002
1003         /* Now, redo traversals in correct order: */
1004         i = 0;
1005         while (i < trans->nr_sorted) {
1006                 btree_path_idx_t idx = trans->sorted[i];
1007
1008                 /*
1009                  * Traversing a path can cause another path to be added at about
1010                  * the same position:
1011                  */
1012                 if (trans->paths[idx].uptodate) {
1013                         __btree_path_get(trans, &trans->paths[idx], false);
1014                         ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1015                         __btree_path_put(trans, &trans->paths[idx], false);
1016
1017                         if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1018                             bch2_err_matches(ret, ENOMEM))
1019                                 goto retry_all;
1020                         if (ret)
1021                                 goto err;
1022                 } else {
1023                         i++;
1024                 }
1025         }
1026
1027         /*
1028          * We used to assert that all paths had been traversed here
1029          * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1030          * path->should_be_locked is not set yet, we might have unlocked and
1031          * then failed to relock a path - that's fine.
1032          */
1033 err:
1034         bch2_btree_cache_cannibalize_unlock(trans);
1035
1036         trans->in_traverse_all = false;
1037
1038         trace_and_count(c, trans_traverse_all, trans, trace_ip);
1039         return ret;
1040 }
1041
1042 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1043                                                 unsigned l, int check_pos)
1044 {
1045         if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1046                 return false;
1047         if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1048                 return false;
1049         return true;
1050 }
1051
1052 static inline bool btree_path_good_node(struct btree_trans *trans,
1053                                         struct btree_path *path,
1054                                         unsigned l, int check_pos)
1055 {
1056         return is_btree_node(path, l) &&
1057                 bch2_btree_node_relock(trans, path, l) &&
1058                 btree_path_check_pos_in_node(path, l, check_pos);
1059 }
1060
1061 static void btree_path_set_level_down(struct btree_trans *trans,
1062                                       struct btree_path *path,
1063                                       unsigned new_level)
1064 {
1065         unsigned l;
1066
1067         path->level = new_level;
1068
1069         for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1070                 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1071                         btree_node_unlock(trans, path, l);
1072
1073         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1074         bch2_btree_path_verify(trans, path);
1075 }
1076
1077 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1078                                                          struct btree_path *path,
1079                                                          int check_pos)
1080 {
1081         unsigned i, l = path->level;
1082 again:
1083         while (btree_path_node(path, l) &&
1084                !btree_path_good_node(trans, path, l, check_pos))
1085                 __btree_path_set_level_up(trans, path, l++);
1086
1087         /* If we need intent locks, take them too: */
1088         for (i = l + 1;
1089              i < path->locks_want && btree_path_node(path, i);
1090              i++)
1091                 if (!bch2_btree_node_relock(trans, path, i)) {
1092                         while (l <= i)
1093                                 __btree_path_set_level_up(trans, path, l++);
1094                         goto again;
1095                 }
1096
1097         return l;
1098 }
1099
1100 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1101                                                      struct btree_path *path,
1102                                                      int check_pos)
1103 {
1104         return likely(btree_node_locked(path, path->level) &&
1105                       btree_path_check_pos_in_node(path, path->level, check_pos))
1106                 ? path->level
1107                 : __btree_path_up_until_good_node(trans, path, check_pos);
1108 }
1109
1110 /*
1111  * This is the main state machine for walking down the btree - walks down to a
1112  * specified depth
1113  *
1114  * Returns 0 on success, -EIO on error (error reading in a btree node).
1115  *
1116  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1117  * stashed in the iterator and returned from bch2_trans_exit().
1118  */
1119 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1120                                  btree_path_idx_t path_idx,
1121                                  unsigned flags,
1122                                  unsigned long trace_ip)
1123 {
1124         struct btree_path *path = &trans->paths[path_idx];
1125         unsigned depth_want = path->level;
1126         int ret = -((int) trans->restarted);
1127
1128         if (unlikely(ret))
1129                 goto out;
1130
1131         if (unlikely(!trans->srcu_held))
1132                 bch2_trans_srcu_lock(trans);
1133
1134         trace_btree_path_traverse_start(trans, path);
1135
1136         /*
1137          * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1138          * and re-traverse the path without a transaction restart:
1139          */
1140         if (path->should_be_locked) {
1141                 ret = bch2_btree_path_relock(trans, path, trace_ip);
1142                 goto out;
1143         }
1144
1145         if (path->cached) {
1146                 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1147                 goto out;
1148         }
1149
1150         path = &trans->paths[path_idx];
1151
1152         if (unlikely(path->level >= BTREE_MAX_DEPTH))
1153                 goto out_uptodate;
1154
1155         path->level = btree_path_up_until_good_node(trans, path, 0);
1156         unsigned max_level = path->level;
1157
1158         EBUG_ON(btree_path_node(path, path->level) &&
1159                 !btree_node_locked(path, path->level));
1160
1161         /*
1162          * Note: path->nodes[path->level] may be temporarily NULL here - that
1163          * would indicate to other code that we got to the end of the btree,
1164          * here it indicates that relocking the root failed - it's critical that
1165          * btree_path_lock_root() comes next and that it can't fail
1166          */
1167         while (path->level > depth_want) {
1168                 ret = btree_path_node(path, path->level)
1169                         ? btree_path_down(trans, path, flags, trace_ip)
1170                         : btree_path_lock_root(trans, path, depth_want, trace_ip);
1171                 if (unlikely(ret)) {
1172                         if (ret == 1) {
1173                                 /*
1174                                  * No nodes at this level - got to the end of
1175                                  * the btree:
1176                                  */
1177                                 ret = 0;
1178                                 goto out;
1179                         }
1180
1181                         __bch2_btree_path_unlock(trans, path);
1182                         path->level = depth_want;
1183                         path->l[path->level].b = ERR_PTR(ret);
1184                         goto out;
1185                 }
1186         }
1187
1188         if (unlikely(max_level > path->level)) {
1189                 struct btree_path *linked;
1190                 unsigned iter;
1191
1192                 trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
1193                         for (unsigned j = path->level + 1; j < max_level; j++)
1194                                 linked->l[j] = path->l[j];
1195         }
1196
1197 out_uptodate:
1198         path->uptodate = BTREE_ITER_UPTODATE;
1199         trace_btree_path_traverse_end(trans, path);
1200 out:
1201         if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1202                 panic("ret %s (%i) trans->restarted %s (%i)\n",
1203                       bch2_err_str(ret), ret,
1204                       bch2_err_str(trans->restarted), trans->restarted);
1205         bch2_btree_path_verify(trans, path);
1206         return ret;
1207 }
1208
1209 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1210                             struct btree_path *src)
1211 {
1212         unsigned i, offset = offsetof(struct btree_path, pos);
1213
1214         memcpy((void *) dst + offset,
1215                (void *) src + offset,
1216                sizeof(struct btree_path) - offset);
1217
1218         for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1219                 unsigned t = btree_node_locked_type(dst, i);
1220
1221                 if (t != BTREE_NODE_UNLOCKED)
1222                         six_lock_increment(&dst->l[i].b->c.lock, t);
1223         }
1224 }
1225
1226 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1227                                          bool intent, unsigned long ip)
1228 {
1229         btree_path_idx_t new = btree_path_alloc(trans, src);
1230         btree_path_copy(trans, trans->paths + new, trans->paths + src);
1231         __btree_path_get(trans, trans->paths + new, intent);
1232 #ifdef TRACK_PATH_ALLOCATED
1233         trans->paths[new].ip_allocated = ip;
1234 #endif
1235         return new;
1236 }
1237
1238 __flatten
1239 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1240                         btree_path_idx_t path, bool intent, unsigned long ip)
1241 {
1242         struct btree_path *old = trans->paths + path;
1243         __btree_path_put(trans, trans->paths + path, intent);
1244         path = btree_path_clone(trans, path, intent, ip);
1245         trace_btree_path_clone(trans, old, trans->paths + path);
1246         trans->paths[path].preserve = false;
1247         return path;
1248 }
1249
1250 btree_path_idx_t __must_check
1251 __bch2_btree_path_set_pos(struct btree_trans *trans,
1252                           btree_path_idx_t path_idx, struct bpos new_pos,
1253                           bool intent, unsigned long ip)
1254 {
1255         int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1256
1257         bch2_trans_verify_not_in_restart(trans);
1258         EBUG_ON(!trans->paths[path_idx].ref);
1259
1260         trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
1261
1262         path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1263
1264         struct btree_path *path = trans->paths + path_idx;
1265         path->pos               = new_pos;
1266         trans->paths_sorted     = false;
1267
1268         if (unlikely(path->cached)) {
1269                 btree_node_unlock(trans, path, 0);
1270                 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1271                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1272                 goto out;
1273         }
1274
1275         unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1276
1277         if (btree_path_node(path, level)) {
1278                 struct btree_path_level *l = &path->l[level];
1279
1280                 BUG_ON(!btree_node_locked(path, level));
1281                 /*
1282                  * We might have to skip over many keys, or just a few: try
1283                  * advancing the node iterator, and if we have to skip over too
1284                  * many keys just reinit it (or if we're rewinding, since that
1285                  * is expensive).
1286                  */
1287                 if (cmp < 0 ||
1288                     !btree_path_advance_to_pos(path, l, 8))
1289                         bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1290
1291                 /*
1292                  * Iterators to interior nodes should always be pointed at the first non
1293                  * whiteout:
1294                  */
1295                 if (unlikely(level))
1296                         bch2_btree_node_iter_peek(&l->iter, l->b);
1297         }
1298
1299         if (unlikely(level != path->level)) {
1300                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1301                 __bch2_btree_path_unlock(trans, path);
1302         }
1303 out:
1304         bch2_btree_path_verify(trans, path);
1305         return path_idx;
1306 }
1307
1308 /* Btree path: main interface: */
1309
1310 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1311 {
1312         struct btree_path *sib;
1313
1314         sib = prev_btree_path(trans, path);
1315         if (sib && !btree_path_cmp(sib, path))
1316                 return sib;
1317
1318         sib = next_btree_path(trans, path);
1319         if (sib && !btree_path_cmp(sib, path))
1320                 return sib;
1321
1322         return NULL;
1323 }
1324
1325 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1326 {
1327         struct btree_path *sib;
1328
1329         sib = prev_btree_path(trans, path);
1330         if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1331                 return sib;
1332
1333         sib = next_btree_path(trans, path);
1334         if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1335                 return sib;
1336
1337         return NULL;
1338 }
1339
1340 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1341 {
1342         __bch2_btree_path_unlock(trans, trans->paths + path);
1343         btree_path_list_remove(trans, trans->paths + path);
1344         __clear_bit(path, trans->paths_allocated);
1345 }
1346
1347 static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
1348 {
1349         unsigned l = path->level;
1350
1351         do {
1352                 if (!btree_path_node(path, l))
1353                         break;
1354
1355                 if (!is_btree_node(path, l))
1356                         return false;
1357
1358                 if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
1359                         return false;
1360
1361                 l++;
1362         } while (l < path->locks_want);
1363
1364         return true;
1365 }
1366
1367 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1368 {
1369         struct btree_path *path = trans->paths + path_idx, *dup;
1370
1371         if (!__btree_path_put(trans, path, intent))
1372                 return;
1373
1374         dup = path->preserve
1375                 ? have_path_at_pos(trans, path)
1376                 : have_node_at_pos(trans, path);
1377
1378         trace_btree_path_free(trans, path_idx, dup);
1379
1380         if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1381                 return;
1382
1383         if (path->should_be_locked && !trans->restarted) {
1384                 if (!dup)
1385                         return;
1386
1387                 if (!(trans->locked
1388                       ? bch2_btree_path_relock_norestart(trans, dup)
1389                       : bch2_btree_path_can_relock(trans, dup)))
1390                         return;
1391         }
1392
1393         if (dup) {
1394                 dup->preserve           |= path->preserve;
1395                 dup->should_be_locked   |= path->should_be_locked;
1396         }
1397
1398         __bch2_path_free(trans, path_idx);
1399 }
1400
1401 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1402                                  bool intent)
1403 {
1404         if (!__btree_path_put(trans, trans->paths + path, intent))
1405                 return;
1406
1407         __bch2_path_free(trans, path);
1408 }
1409
1410 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1411 {
1412         panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1413               trans->restart_count, restart_count,
1414               (void *) trans->last_begin_ip);
1415 }
1416
1417 void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1418 {
1419         panic("in transaction restart: %s, last restarted by %pS\n",
1420               bch2_err_str(trans->restarted),
1421               (void *) trans->last_restarted_ip);
1422 }
1423
1424 void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
1425 {
1426         panic("trans should be locked, unlocked by %pS\n",
1427               (void *) trans->last_unlock_ip);
1428 }
1429
1430 noinline __cold
1431 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1432 {
1433         prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
1434                    trans->nr_updates, trans->fn, trans->journal_res.seq);
1435         printbuf_indent_add(buf, 2);
1436
1437         trans_for_each_update(trans, i) {
1438                 struct bkey_s_c old = { &i->old_k, i->old_v };
1439
1440                 prt_printf(buf, "update: btree=%s cached=%u %pS\n",
1441                        bch2_btree_id_str(i->btree_id),
1442                        i->cached,
1443                        (void *) i->ip_allocated);
1444
1445                 prt_printf(buf, "  old ");
1446                 bch2_bkey_val_to_text(buf, trans->c, old);
1447                 prt_newline(buf);
1448
1449                 prt_printf(buf, "  new ");
1450                 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1451                 prt_newline(buf);
1452         }
1453
1454         for (struct jset_entry *e = trans->journal_entries;
1455              e != btree_trans_journal_entries_top(trans);
1456              e = vstruct_next(e))
1457                 bch2_journal_entry_to_text(buf, trans->c, e);
1458
1459         printbuf_indent_sub(buf, 2);
1460 }
1461
1462 noinline __cold
1463 void bch2_dump_trans_updates(struct btree_trans *trans)
1464 {
1465         struct printbuf buf = PRINTBUF;
1466
1467         bch2_trans_updates_to_text(&buf, trans);
1468         bch2_print_str(trans->c, buf.buf);
1469         printbuf_exit(&buf);
1470 }
1471
1472 static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1473 {
1474         struct btree_path *path = trans->paths + path_idx;
1475
1476         prt_printf(out, "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos ",
1477                    path_idx, path->ref, path->intent_ref,
1478                    path->preserve ? 'P' : ' ',
1479                    path->should_be_locked ? 'S' : ' ',
1480                    path->cached ? 'C' : 'B',
1481                    bch2_btree_id_str(path->btree_id),
1482                    path->level);
1483         bch2_bpos_to_text(out, path->pos);
1484
1485         if (!path->cached && btree_node_locked(path, path->level)) {
1486                 prt_char(out, ' ');
1487                 struct btree *b = path_l(path)->b;
1488                 bch2_bpos_to_text(out, b->data->min_key);
1489                 prt_char(out, '-');
1490                 bch2_bpos_to_text(out, b->key.k.p);
1491         }
1492
1493 #ifdef TRACK_PATH_ALLOCATED
1494         prt_printf(out, " %pS", (void *) path->ip_allocated);
1495 #endif
1496 }
1497
1498 static const char *btree_node_locked_str(enum btree_node_locked_type t)
1499 {
1500         switch (t) {
1501         case BTREE_NODE_UNLOCKED:
1502                 return "unlocked";
1503         case BTREE_NODE_READ_LOCKED:
1504                 return "read";
1505         case BTREE_NODE_INTENT_LOCKED:
1506                 return "intent";
1507         case BTREE_NODE_WRITE_LOCKED:
1508                 return "write";
1509         default:
1510                 return NULL;
1511         }
1512 }
1513
1514 void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1515 {
1516         bch2_btree_path_to_text_short(out, trans, path_idx);
1517
1518         struct btree_path *path = trans->paths + path_idx;
1519
1520         prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
1521         prt_newline(out);
1522
1523         printbuf_indent_add(out, 2);
1524         for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
1525                 prt_printf(out, "l=%u locks %s seq %u node ", l,
1526                            btree_node_locked_str(btree_node_locked_type(path, l)),
1527                            path->l[l].lock_seq);
1528
1529                 int ret = PTR_ERR_OR_ZERO(path->l[l].b);
1530                 if (ret)
1531                         prt_str(out, bch2_err_str(ret));
1532                 else
1533                         prt_printf(out, "%px", path->l[l].b);
1534                 prt_newline(out);
1535         }
1536         printbuf_indent_sub(out, 2);
1537 }
1538
1539 static noinline __cold
1540 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1541                                 bool nosort)
1542 {
1543         struct trans_for_each_path_inorder_iter iter;
1544
1545         if (!nosort)
1546                 btree_trans_sort_paths(trans);
1547
1548         trans_for_each_path_idx_inorder(trans, iter) {
1549                 bch2_btree_path_to_text_short(out, trans, iter.path_idx);
1550                 prt_newline(out);
1551         }
1552 }
1553
1554 noinline __cold
1555 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1556 {
1557         __bch2_trans_paths_to_text(out, trans, false);
1558 }
1559
1560 static noinline __cold
1561 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1562 {
1563         struct printbuf buf = PRINTBUF;
1564
1565         __bch2_trans_paths_to_text(&buf, trans, nosort);
1566         bch2_trans_updates_to_text(&buf, trans);
1567
1568         bch2_print_str(trans->c, buf.buf);
1569         printbuf_exit(&buf);
1570 }
1571
1572 noinline __cold
1573 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1574 {
1575         __bch2_dump_trans_paths_updates(trans, false);
1576 }
1577
1578 noinline __cold
1579 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1580 {
1581         struct btree_transaction_stats *s = btree_trans_stats(trans);
1582         struct printbuf buf = PRINTBUF;
1583         size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1584
1585         bch2_trans_paths_to_text(&buf, trans);
1586
1587         if (!buf.allocation_failure) {
1588                 mutex_lock(&s->lock);
1589                 if (nr > s->nr_max_paths) {
1590                         s->nr_max_paths = nr;
1591                         swap(s->max_paths_text, buf.buf);
1592                 }
1593                 mutex_unlock(&s->lock);
1594         }
1595
1596         printbuf_exit(&buf);
1597
1598         trans->nr_paths_max = nr;
1599 }
1600
1601 noinline __cold
1602 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1603 {
1604         if (trace_trans_restart_too_many_iters_enabled()) {
1605                 struct printbuf buf = PRINTBUF;
1606
1607                 bch2_trans_paths_to_text(&buf, trans);
1608                 trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1609                 printbuf_exit(&buf);
1610         }
1611
1612         count_event(trans->c, trans_restart_too_many_iters);
1613
1614         return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1615 }
1616
1617 static noinline void btree_path_overflow(struct btree_trans *trans)
1618 {
1619         bch2_dump_trans_paths_updates(trans);
1620         bch_err(trans->c, "trans path overflow");
1621 }
1622
1623 static noinline void btree_paths_realloc(struct btree_trans *trans)
1624 {
1625         unsigned nr = trans->nr_paths * 2;
1626
1627         void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1628                           sizeof(struct btree_trans_paths) +
1629                           nr * sizeof(struct btree_path) +
1630                           nr * sizeof(btree_path_idx_t) + 8 +
1631                           nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1632
1633         unsigned long *paths_allocated = p;
1634         memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1635         p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1636
1637         p += sizeof(struct btree_trans_paths);
1638         struct btree_path *paths = p;
1639         *trans_paths_nr(paths) = nr;
1640         memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1641         p += nr * sizeof(struct btree_path);
1642
1643         btree_path_idx_t *sorted = p;
1644         memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1645         p += nr * sizeof(btree_path_idx_t) + 8;
1646
1647         struct btree_insert_entry *updates = p;
1648         memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1649
1650         unsigned long *old = trans->paths_allocated;
1651
1652         rcu_assign_pointer(trans->paths_allocated,      paths_allocated);
1653         rcu_assign_pointer(trans->paths,                paths);
1654         rcu_assign_pointer(trans->sorted,               sorted);
1655         rcu_assign_pointer(trans->updates,              updates);
1656
1657         trans->nr_paths         = nr;
1658
1659         if (old != trans->_paths_allocated)
1660                 kfree_rcu_mightsleep(old);
1661 }
1662
1663 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1664                                                 btree_path_idx_t pos)
1665 {
1666         btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1667
1668         if (unlikely(idx == trans->nr_paths)) {
1669                 if (trans->nr_paths == BTREE_ITER_MAX) {
1670                         btree_path_overflow(trans);
1671                         return 0;
1672                 }
1673
1674                 btree_paths_realloc(trans);
1675         }
1676
1677         /*
1678          * Do this before marking the new path as allocated, since it won't be
1679          * initialized yet:
1680          */
1681         if (unlikely(idx > trans->nr_paths_max))
1682                 bch2_trans_update_max_paths(trans);
1683
1684         __set_bit(idx, trans->paths_allocated);
1685
1686         struct btree_path *path = &trans->paths[idx];
1687         path->ref               = 0;
1688         path->intent_ref        = 0;
1689         path->nodes_locked      = 0;
1690
1691         btree_path_list_add(trans, pos, idx);
1692         trans->paths_sorted = false;
1693         return idx;
1694 }
1695
1696 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1697                              enum btree_id btree_id, struct bpos pos,
1698                              unsigned locks_want, unsigned level,
1699                              unsigned flags, unsigned long ip)
1700 {
1701         struct btree_path *path;
1702         bool cached = flags & BTREE_ITER_cached;
1703         bool intent = flags & BTREE_ITER_intent;
1704         struct trans_for_each_path_inorder_iter iter;
1705         btree_path_idx_t path_pos = 0, path_idx;
1706
1707         bch2_trans_verify_not_unlocked(trans);
1708         bch2_trans_verify_not_in_restart(trans);
1709         bch2_trans_verify_locks(trans);
1710
1711         btree_trans_sort_paths(trans);
1712
1713         trans_for_each_path_inorder(trans, path, iter) {
1714                 if (__btree_path_cmp(path,
1715                                      btree_id,
1716                                      cached,
1717                                      pos,
1718                                      level) > 0)
1719                         break;
1720
1721                 path_pos = iter.path_idx;
1722         }
1723
1724         if (path_pos &&
1725             trans->paths[path_pos].cached       == cached &&
1726             trans->paths[path_pos].btree_id     == btree_id &&
1727             trans->paths[path_pos].level        == level) {
1728                 trace_btree_path_get(trans, trans->paths + path_pos, &pos);
1729
1730                 __btree_path_get(trans, trans->paths + path_pos, intent);
1731                 path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1732                 path = trans->paths + path_idx;
1733         } else {
1734                 path_idx = btree_path_alloc(trans, path_pos);
1735                 path = trans->paths + path_idx;
1736
1737                 __btree_path_get(trans, path, intent);
1738                 path->pos                       = pos;
1739                 path->btree_id                  = btree_id;
1740                 path->cached                    = cached;
1741                 path->uptodate                  = BTREE_ITER_NEED_TRAVERSE;
1742                 path->should_be_locked          = false;
1743                 path->level                     = level;
1744                 path->locks_want                = locks_want;
1745                 path->nodes_locked              = 0;
1746                 for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1747                         path->l[i].b            = ERR_PTR(-BCH_ERR_no_btree_node_init);
1748 #ifdef TRACK_PATH_ALLOCATED
1749                 path->ip_allocated              = ip;
1750 #endif
1751                 trans->paths_sorted             = false;
1752
1753                 trace_btree_path_alloc(trans, path);
1754         }
1755
1756         if (!(flags & BTREE_ITER_nopreserve))
1757                 path->preserve = true;
1758
1759         if (path->intent_ref)
1760                 locks_want = max(locks_want, level + 1);
1761
1762         /*
1763          * If the path has locks_want greater than requested, we don't downgrade
1764          * it here - on transaction restart because btree node split needs to
1765          * upgrade locks, we might be putting/getting the iterator again.
1766          * Downgrading iterators only happens via bch2_trans_downgrade(), after
1767          * a successful transaction commit.
1768          */
1769
1770         locks_want = min(locks_want, BTREE_MAX_DEPTH);
1771         if (locks_want > path->locks_want)
1772                 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1773
1774         return path_idx;
1775 }
1776
1777 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
1778                                             enum btree_id btree_id,
1779                                             unsigned level,
1780                                             struct bpos pos)
1781 {
1782         btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
1783                              BTREE_ITER_nopreserve|
1784                              BTREE_ITER_intent, _RET_IP_);
1785         path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
1786
1787         struct btree_path *path = trans->paths + path_idx;
1788         bch2_btree_path_downgrade(trans, path);
1789         __bch2_btree_path_unlock(trans, path);
1790         return path_idx;
1791 }
1792
1793 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1794 {
1795
1796         struct btree_path_level *l = path_l(path);
1797         struct bkey_packed *_k;
1798         struct bkey_s_c k;
1799
1800         if (unlikely(!l->b))
1801                 return bkey_s_c_null;
1802
1803         EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1804         EBUG_ON(!btree_node_locked(path, path->level));
1805
1806         if (!path->cached) {
1807                 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1808                 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1809
1810                 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1811
1812                 if (!k.k || !bpos_eq(path->pos, k.k->p))
1813                         goto hole;
1814         } else {
1815                 struct bkey_cached *ck = (void *) path->l[0].b;
1816                 if (!ck)
1817                         return bkey_s_c_null;
1818
1819                 EBUG_ON(path->btree_id != ck->key.btree_id ||
1820                         !bkey_eq(path->pos, ck->key.pos));
1821
1822                 *u = ck->k->k;
1823                 k = bkey_i_to_s_c(ck->k);
1824         }
1825
1826         return k;
1827 hole:
1828         bkey_init(u);
1829         u->p = path->pos;
1830         return (struct bkey_s_c) { u, NULL };
1831 }
1832
1833
1834 void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
1835 {
1836         struct btree_trans *trans = iter->trans;
1837
1838         if (!iter->path || trans->restarted)
1839                 return;
1840
1841         struct btree_path *path = btree_iter_path(trans, iter);
1842         path->preserve          = false;
1843         if (path->ref == 1)
1844                 path->should_be_locked  = false;
1845 }
1846 /* Btree iterators: */
1847
1848 int __must_check
1849 __bch2_btree_iter_traverse(struct btree_iter *iter)
1850 {
1851         return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1852 }
1853
1854 int __must_check
1855 bch2_btree_iter_traverse(struct btree_iter *iter)
1856 {
1857         struct btree_trans *trans = iter->trans;
1858         int ret;
1859
1860         bch2_trans_verify_not_unlocked(trans);
1861
1862         iter->path = bch2_btree_path_set_pos(trans, iter->path,
1863                                         btree_iter_search_key(iter),
1864                                         iter->flags & BTREE_ITER_intent,
1865                                         btree_iter_ip_allocated(iter));
1866
1867         ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1868         if (ret)
1869                 return ret;
1870
1871         struct btree_path *path = btree_iter_path(trans, iter);
1872         if (btree_path_node(path, path->level))
1873                 btree_path_set_should_be_locked(trans, path);
1874         return 0;
1875 }
1876
1877 /* Iterate across nodes (leaf and interior nodes) */
1878
1879 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1880 {
1881         struct btree_trans *trans = iter->trans;
1882         struct btree *b = NULL;
1883         int ret;
1884
1885         EBUG_ON(trans->paths[iter->path].cached);
1886         bch2_btree_iter_verify(iter);
1887
1888         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1889         if (ret)
1890                 goto err;
1891
1892         struct btree_path *path = btree_iter_path(trans, iter);
1893         b = btree_path_node(path, path->level);
1894         if (!b)
1895                 goto out;
1896
1897         BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1898
1899         bkey_init(&iter->k);
1900         iter->k.p = iter->pos = b->key.k.p;
1901
1902         iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1903                                         iter->flags & BTREE_ITER_intent,
1904                                         btree_iter_ip_allocated(iter));
1905         btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
1906 out:
1907         bch2_btree_iter_verify_entry_exit(iter);
1908         bch2_btree_iter_verify(iter);
1909
1910         return b;
1911 err:
1912         b = ERR_PTR(ret);
1913         goto out;
1914 }
1915
1916 /* Only kept for -tools */
1917 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1918 {
1919         struct btree *b;
1920
1921         while (b = bch2_btree_iter_peek_node(iter),
1922                bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1923                 bch2_trans_begin(iter->trans);
1924
1925         return b;
1926 }
1927
1928 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1929 {
1930         struct btree_trans *trans = iter->trans;
1931         struct btree *b = NULL;
1932         int ret;
1933
1934         EBUG_ON(trans->paths[iter->path].cached);
1935         bch2_trans_verify_not_in_restart(trans);
1936         bch2_btree_iter_verify(iter);
1937
1938         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1939         if (ret)
1940                 goto err;
1941
1942
1943         struct btree_path *path = btree_iter_path(trans, iter);
1944
1945         /* already at end? */
1946         if (!btree_path_node(path, path->level))
1947                 return NULL;
1948
1949         /* got to end? */
1950         if (!btree_path_node(path, path->level + 1)) {
1951                 btree_path_set_level_up(trans, path);
1952                 return NULL;
1953         }
1954
1955         if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1956                 __bch2_btree_path_unlock(trans, path);
1957                 path->l[path->level].b          = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1958                 path->l[path->level + 1].b      = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1959                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1960                 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1961                 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1962                 goto err;
1963         }
1964
1965         b = btree_path_node(path, path->level + 1);
1966
1967         if (bpos_eq(iter->pos, b->key.k.p)) {
1968                 __btree_path_set_level_up(trans, path, path->level++);
1969         } else {
1970                 if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
1971                         btree_node_unlock(trans, path, path->level + 1);
1972
1973                 /*
1974                  * Haven't gotten to the end of the parent node: go back down to
1975                  * the next child node
1976                  */
1977                 iter->path = bch2_btree_path_set_pos(trans, iter->path,
1978                                         bpos_successor(iter->pos),
1979                                         iter->flags & BTREE_ITER_intent,
1980                                         btree_iter_ip_allocated(iter));
1981
1982                 path = btree_iter_path(trans, iter);
1983                 btree_path_set_level_down(trans, path, iter->min_depth);
1984
1985                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1986                 if (ret)
1987                         goto err;
1988
1989                 path = btree_iter_path(trans, iter);
1990                 b = path->l[path->level].b;
1991         }
1992
1993         bkey_init(&iter->k);
1994         iter->k.p = iter->pos = b->key.k.p;
1995
1996         iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1997                                         iter->flags & BTREE_ITER_intent,
1998                                         btree_iter_ip_allocated(iter));
1999         btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2000         EBUG_ON(btree_iter_path(trans, iter)->uptodate);
2001 out:
2002         bch2_btree_iter_verify_entry_exit(iter);
2003         bch2_btree_iter_verify(iter);
2004
2005         return b;
2006 err:
2007         b = ERR_PTR(ret);
2008         goto out;
2009 }
2010
2011 /* Iterate across keys (in leaf nodes only) */
2012
2013 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2014 {
2015         struct bpos pos = iter->k.p;
2016         bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2017                      ? bpos_eq(pos, SPOS_MAX)
2018                      : bkey_eq(pos, SPOS_MAX));
2019
2020         if (ret && !(iter->flags & BTREE_ITER_is_extents))
2021                 pos = bkey_successor(iter, pos);
2022         bch2_btree_iter_set_pos(iter, pos);
2023         return ret;
2024 }
2025
2026 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2027 {
2028         struct bpos pos = bkey_start_pos(&iter->k);
2029         bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2030                      ? bpos_eq(pos, POS_MIN)
2031                      : bkey_eq(pos, POS_MIN));
2032
2033         if (ret && !(iter->flags & BTREE_ITER_is_extents))
2034                 pos = bkey_predecessor(iter, pos);
2035         bch2_btree_iter_set_pos(iter, pos);
2036         return ret;
2037 }
2038
2039 static noinline
2040 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
2041                                         struct bkey_s_c *k)
2042 {
2043         struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
2044
2045         trans_for_each_update(trans, i)
2046                 if (!i->key_cache_already_flushed &&
2047                     i->btree_id == iter->btree_id &&
2048                     bpos_le(i->k->k.p, iter->pos) &&
2049                     bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
2050                         iter->k = i->k->k;
2051                         *k = bkey_i_to_s_c(i->k);
2052                 }
2053 }
2054
2055 static noinline
2056 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
2057                                    struct bkey_s_c *k)
2058 {
2059         struct btree_path *path = btree_iter_path(trans, iter);
2060         struct bpos end = path_l(path)->b->key.k.p;
2061
2062         trans_for_each_update(trans, i)
2063                 if (!i->key_cache_already_flushed &&
2064                     i->btree_id == iter->btree_id &&
2065                     bpos_ge(i->k->k.p, path->pos) &&
2066                     bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
2067                         iter->k = i->k->k;
2068                         *k = bkey_i_to_s_c(i->k);
2069                 }
2070 }
2071
2072 static noinline
2073 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
2074                                         struct bkey_s_c *k)
2075 {
2076         trans_for_each_update(trans, i)
2077                 if (!i->key_cache_already_flushed &&
2078                     i->btree_id == iter->btree_id &&
2079                     bpos_eq(i->k->k.p, iter->pos)) {
2080                         iter->k = i->k->k;
2081                         *k = bkey_i_to_s_c(i->k);
2082                 }
2083 }
2084
2085 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2086                                               struct btree_iter *iter,
2087                                               struct bpos end_pos)
2088 {
2089         struct btree_path *path = btree_iter_path(trans, iter);
2090
2091         return bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
2092                                            path->level,
2093                                            path->pos,
2094                                            end_pos,
2095                                            &iter->journal_idx);
2096 }
2097
2098 static noinline
2099 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2100                                               struct btree_iter *iter)
2101 {
2102         struct btree_path *path = btree_iter_path(trans, iter);
2103         struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
2104
2105         if (k) {
2106                 iter->k = k->k;
2107                 return bkey_i_to_s_c(k);
2108         } else {
2109                 return bkey_s_c_null;
2110         }
2111 }
2112
2113 static noinline
2114 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2115                                          struct btree_iter *iter,
2116                                          struct bkey_s_c k)
2117 {
2118         struct btree_path *path = btree_iter_path(trans, iter);
2119         struct bkey_i *next_journal =
2120                 bch2_btree_journal_peek(trans, iter,
2121                                 k.k ? k.k->p : path_l(path)->b->key.k.p);
2122
2123         if (next_journal) {
2124                 iter->k = next_journal->k;
2125                 k = bkey_i_to_s_c(next_journal);
2126         }
2127
2128         return k;
2129 }
2130
2131 /*
2132  * Checks btree key cache for key at iter->pos and returns it if present, or
2133  * bkey_s_c_null:
2134  */
2135 static noinline
2136 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2137 {
2138         struct btree_trans *trans = iter->trans;
2139         struct bch_fs *c = trans->c;
2140         struct bkey u;
2141         struct bkey_s_c k;
2142         int ret;
2143
2144         bch2_trans_verify_not_in_restart(trans);
2145         bch2_trans_verify_not_unlocked(trans);
2146
2147         if ((iter->flags & BTREE_ITER_key_cache_fill) &&
2148             bpos_eq(iter->pos, pos))
2149                 return bkey_s_c_null;
2150
2151         if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2152                 return bkey_s_c_null;
2153
2154         if (!iter->key_cache_path)
2155                 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2156                                                      iter->flags & BTREE_ITER_intent, 0,
2157                                                      iter->flags|BTREE_ITER_cached|
2158                                                      BTREE_ITER_cached_nofill,
2159                                                      _THIS_IP_);
2160
2161         iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2162                                         iter->flags & BTREE_ITER_intent,
2163                                         btree_iter_ip_allocated(iter));
2164
2165         ret =   bch2_btree_path_traverse(trans, iter->key_cache_path,
2166                                          iter->flags|BTREE_ITER_cached) ?:
2167                 bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2168         if (unlikely(ret))
2169                 return bkey_s_c_err(ret);
2170
2171         btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
2172
2173         k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2174         if (k.k && !bkey_err(k)) {
2175                 iter->k = u;
2176                 k.k = &iter->k;
2177         }
2178         return k;
2179 }
2180
2181 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2182 {
2183         struct btree_trans *trans = iter->trans;
2184         struct bkey_s_c k, k2;
2185         int ret;
2186
2187         EBUG_ON(btree_iter_path(trans, iter)->cached);
2188         bch2_btree_iter_verify(iter);
2189
2190         while (1) {
2191                 struct btree_path_level *l;
2192
2193                 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2194                                         iter->flags & BTREE_ITER_intent,
2195                                         btree_iter_ip_allocated(iter));
2196
2197                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2198                 if (unlikely(ret)) {
2199                         /* ensure that iter->k is consistent with iter->pos: */
2200                         bch2_btree_iter_set_pos(iter, iter->pos);
2201                         k = bkey_s_c_err(ret);
2202                         goto out;
2203                 }
2204
2205                 struct btree_path *path = btree_iter_path(trans, iter);
2206                 l = path_l(path);
2207
2208                 if (unlikely(!l->b)) {
2209                         /* No btree nodes at requested level: */
2210                         bch2_btree_iter_set_pos(iter, SPOS_MAX);
2211                         k = bkey_s_c_null;
2212                         goto out;
2213                 }
2214
2215                 btree_path_set_should_be_locked(trans, path);
2216
2217                 k = btree_path_level_peek_all(trans->c, l, &iter->k);
2218
2219                 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2220                     k.k &&
2221                     (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2222                         k = k2;
2223                         ret = bkey_err(k);
2224                         if (ret) {
2225                                 bch2_btree_iter_set_pos(iter, iter->pos);
2226                                 goto out;
2227                         }
2228                 }
2229
2230                 if (unlikely(iter->flags & BTREE_ITER_with_journal))
2231                         k = btree_trans_peek_journal(trans, iter, k);
2232
2233                 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2234                              trans->nr_updates))
2235                         bch2_btree_trans_peek_updates(trans, iter, &k);
2236
2237                 if (k.k && bkey_deleted(k.k)) {
2238                         /*
2239                          * If we've got a whiteout, and it's after the search
2240                          * key, advance the search key to the whiteout instead
2241                          * of just after the whiteout - it might be a btree
2242                          * whiteout, with a real key at the same position, since
2243                          * in the btree deleted keys sort before non deleted.
2244                          */
2245                         search_key = !bpos_eq(search_key, k.k->p)
2246                                 ? k.k->p
2247                                 : bpos_successor(k.k->p);
2248                         continue;
2249                 }
2250
2251                 if (likely(k.k)) {
2252                         break;
2253                 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2254                         /* Advance to next leaf node: */
2255                         search_key = bpos_successor(l->b->key.k.p);
2256                 } else {
2257                         /* End of btree: */
2258                         bch2_btree_iter_set_pos(iter, SPOS_MAX);
2259                         k = bkey_s_c_null;
2260                         goto out;
2261                 }
2262         }
2263 out:
2264         bch2_btree_iter_verify(iter);
2265
2266         return k;
2267 }
2268
2269 /**
2270  * bch2_btree_iter_peek_upto() - returns first key greater than or equal to
2271  * iterator's current position
2272  * @iter:       iterator to peek from
2273  * @end:        search limit: returns keys less than or equal to @end
2274  *
2275  * Returns:     key if found, or an error extractable with bkey_err().
2276  */
2277 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2278 {
2279         struct btree_trans *trans = iter->trans;
2280         struct bpos search_key = btree_iter_search_key(iter);
2281         struct bkey_s_c k;
2282         struct bpos iter_pos;
2283         int ret;
2284
2285         bch2_trans_verify_not_unlocked(trans);
2286         EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
2287
2288         if (iter->update_path) {
2289                 bch2_path_put_nokeep(trans, iter->update_path,
2290                                      iter->flags & BTREE_ITER_intent);
2291                 iter->update_path = 0;
2292         }
2293
2294         bch2_btree_iter_verify_entry_exit(iter);
2295
2296         while (1) {
2297                 k = __bch2_btree_iter_peek(iter, search_key);
2298                 if (unlikely(!k.k))
2299                         goto end;
2300                 if (unlikely(bkey_err(k)))
2301                         goto out_no_locked;
2302
2303                 /*
2304                  * We need to check against @end before FILTER_SNAPSHOTS because
2305                  * if we get to a different inode that requested we might be
2306                  * seeing keys for a different snapshot tree that will all be
2307                  * filtered out.
2308                  *
2309                  * But we can't do the full check here, because bkey_start_pos()
2310                  * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2311                  * that's what we check against in extents mode:
2312                  */
2313                 if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2314                              ? bkey_gt(k.k->p, end)
2315                              : k.k->p.inode > end.inode))
2316                         goto end;
2317
2318                 if (iter->update_path &&
2319                     !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2320                         bch2_path_put_nokeep(trans, iter->update_path,
2321                                              iter->flags & BTREE_ITER_intent);
2322                         iter->update_path = 0;
2323                 }
2324
2325                 if ((iter->flags & BTREE_ITER_filter_snapshots) &&
2326                     (iter->flags & BTREE_ITER_intent) &&
2327                     !(iter->flags & BTREE_ITER_is_extents) &&
2328                     !iter->update_path) {
2329                         struct bpos pos = k.k->p;
2330
2331                         if (pos.snapshot < iter->snapshot) {
2332                                 search_key = bpos_successor(k.k->p);
2333                                 continue;
2334                         }
2335
2336                         pos.snapshot = iter->snapshot;
2337
2338                         /*
2339                          * advance, same as on exit for iter->path, but only up
2340                          * to snapshot
2341                          */
2342                         __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2343                         iter->update_path = iter->path;
2344
2345                         iter->update_path = bch2_btree_path_set_pos(trans,
2346                                                 iter->update_path, pos,
2347                                                 iter->flags & BTREE_ITER_intent,
2348                                                 _THIS_IP_);
2349                         ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2350                         if (unlikely(ret)) {
2351                                 k = bkey_s_c_err(ret);
2352                                 goto out_no_locked;
2353                         }
2354                 }
2355
2356                 /*
2357                  * We can never have a key in a leaf node at POS_MAX, so
2358                  * we don't have to check these successor() calls:
2359                  */
2360                 if ((iter->flags & BTREE_ITER_filter_snapshots) &&
2361                     !bch2_snapshot_is_ancestor(trans->c,
2362                                                iter->snapshot,
2363                                                k.k->p.snapshot)) {
2364                         search_key = bpos_successor(k.k->p);
2365                         continue;
2366                 }
2367
2368                 if (bkey_whiteout(k.k) &&
2369                     !(iter->flags & BTREE_ITER_all_snapshots)) {
2370                         search_key = bkey_successor(iter, k.k->p);
2371                         continue;
2372                 }
2373
2374                 /*
2375                  * iter->pos should be mononotically increasing, and always be
2376                  * equal to the key we just returned - except extents can
2377                  * straddle iter->pos:
2378                  */
2379                 if (!(iter->flags & BTREE_ITER_is_extents))
2380                         iter_pos = k.k->p;
2381                 else
2382                         iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2383
2384                 if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2385                              ? bkey_gt(iter_pos, end)
2386                              : bkey_ge(iter_pos, end)))
2387                         goto end;
2388
2389                 break;
2390         }
2391
2392         iter->pos = iter_pos;
2393
2394         iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2395                                 iter->flags & BTREE_ITER_intent,
2396                                 btree_iter_ip_allocated(iter));
2397
2398         btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2399 out_no_locked:
2400         if (iter->update_path) {
2401                 ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2402                 if (unlikely(ret))
2403                         k = bkey_s_c_err(ret);
2404                 else
2405                         btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
2406         }
2407
2408         if (!(iter->flags & BTREE_ITER_all_snapshots))
2409                 iter->pos.snapshot = iter->snapshot;
2410
2411         ret = bch2_btree_iter_verify_ret(iter, k);
2412         if (unlikely(ret)) {
2413                 bch2_btree_iter_set_pos(iter, iter->pos);
2414                 k = bkey_s_c_err(ret);
2415         }
2416
2417         bch2_btree_iter_verify_entry_exit(iter);
2418
2419         return k;
2420 end:
2421         bch2_btree_iter_set_pos(iter, end);
2422         k = bkey_s_c_null;
2423         goto out_no_locked;
2424 }
2425
2426 /**
2427  * bch2_btree_iter_next() - returns first key greater than iterator's current
2428  * position
2429  * @iter:       iterator to peek from
2430  *
2431  * Returns:     key if found, or an error extractable with bkey_err().
2432  */
2433 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2434 {
2435         if (!bch2_btree_iter_advance(iter))
2436                 return bkey_s_c_null;
2437
2438         return bch2_btree_iter_peek(iter);
2439 }
2440
2441 /**
2442  * bch2_btree_iter_peek_prev() - returns first key less than or equal to
2443  * iterator's current position
2444  * @iter:       iterator to peek from
2445  *
2446  * Returns:     key if found, or an error extractable with bkey_err().
2447  */
2448 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2449 {
2450         struct btree_trans *trans = iter->trans;
2451         struct bpos search_key = iter->pos;
2452         struct bkey_s_c k;
2453         struct bkey saved_k;
2454         const struct bch_val *saved_v;
2455         btree_path_idx_t saved_path = 0;
2456         int ret;
2457
2458         bch2_trans_verify_not_unlocked(trans);
2459         EBUG_ON(btree_iter_path(trans, iter)->cached ||
2460                 btree_iter_path(trans, iter)->level);
2461
2462         if (iter->flags & BTREE_ITER_with_journal)
2463                 return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported);
2464
2465         bch2_btree_iter_verify(iter);
2466         bch2_btree_iter_verify_entry_exit(iter);
2467
2468         if (iter->flags & BTREE_ITER_filter_snapshots)
2469                 search_key.snapshot = U32_MAX;
2470
2471         while (1) {
2472                 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2473                                                 iter->flags & BTREE_ITER_intent,
2474                                                 btree_iter_ip_allocated(iter));
2475
2476                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2477                 if (unlikely(ret)) {
2478                         /* ensure that iter->k is consistent with iter->pos: */
2479                         bch2_btree_iter_set_pos(iter, iter->pos);
2480                         k = bkey_s_c_err(ret);
2481                         goto out_no_locked;
2482                 }
2483
2484                 struct btree_path *path = btree_iter_path(trans, iter);
2485
2486                 k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
2487                 if (!k.k ||
2488                     ((iter->flags & BTREE_ITER_is_extents)
2489                      ? bpos_ge(bkey_start_pos(k.k), search_key)
2490                      : bpos_gt(k.k->p, search_key)))
2491                         k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
2492
2493                 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2494                              trans->nr_updates))
2495                         bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2496
2497                 if (likely(k.k)) {
2498                         if (iter->flags & BTREE_ITER_filter_snapshots) {
2499                                 if (k.k->p.snapshot == iter->snapshot)
2500                                         goto got_key;
2501
2502                                 /*
2503                                  * If we have a saved candidate, and we're no
2504                                  * longer at the same _key_ (not pos), return
2505                                  * that candidate
2506                                  */
2507                                 if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
2508                                         bch2_path_put_nokeep(trans, iter->path,
2509                                                       iter->flags & BTREE_ITER_intent);
2510                                         iter->path = saved_path;
2511                                         saved_path = 0;
2512                                         iter->k = saved_k;
2513                                         k.v     = saved_v;
2514                                         goto got_key;
2515                                 }
2516
2517                                 if (bch2_snapshot_is_ancestor(trans->c,
2518                                                               iter->snapshot,
2519                                                               k.k->p.snapshot)) {
2520                                         if (saved_path)
2521                                                 bch2_path_put_nokeep(trans, saved_path,
2522                                                       iter->flags & BTREE_ITER_intent);
2523                                         saved_path = btree_path_clone(trans, iter->path,
2524                                                                 iter->flags & BTREE_ITER_intent,
2525                                                                 _THIS_IP_);
2526                                         path = btree_iter_path(trans, iter);
2527                                         trace_btree_path_save_pos(trans, path, trans->paths + saved_path);
2528                                         saved_k = *k.k;
2529                                         saved_v = k.v;
2530                                 }
2531
2532                                 search_key = bpos_predecessor(k.k->p);
2533                                 continue;
2534                         }
2535 got_key:
2536                         if (bkey_whiteout(k.k) &&
2537                             !(iter->flags & BTREE_ITER_all_snapshots)) {
2538                                 search_key = bkey_predecessor(iter, k.k->p);
2539                                 if (iter->flags & BTREE_ITER_filter_snapshots)
2540                                         search_key.snapshot = U32_MAX;
2541                                 continue;
2542                         }
2543
2544                         btree_path_set_should_be_locked(trans, path);
2545                         break;
2546                 } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2547                         /* Advance to previous leaf node: */
2548                         search_key = bpos_predecessor(path->l[0].b->data->min_key);
2549                 } else {
2550                         /* Start of btree: */
2551                         bch2_btree_iter_set_pos(iter, POS_MIN);
2552                         k = bkey_s_c_null;
2553                         goto out_no_locked;
2554                 }
2555         }
2556
2557         EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
2558
2559         /* Extents can straddle iter->pos: */
2560         if (bkey_lt(k.k->p, iter->pos))
2561                 iter->pos = k.k->p;
2562
2563         if (iter->flags & BTREE_ITER_filter_snapshots)
2564                 iter->pos.snapshot = iter->snapshot;
2565 out_no_locked:
2566         if (saved_path)
2567                 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
2568
2569         bch2_btree_iter_verify_entry_exit(iter);
2570         bch2_btree_iter_verify(iter);
2571
2572         return k;
2573 }
2574
2575 /**
2576  * bch2_btree_iter_prev() - returns first key less than iterator's current
2577  * position
2578  * @iter:       iterator to peek from
2579  *
2580  * Returns:     key if found, or an error extractable with bkey_err().
2581  */
2582 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2583 {
2584         if (!bch2_btree_iter_rewind(iter))
2585                 return bkey_s_c_null;
2586
2587         return bch2_btree_iter_peek_prev(iter);
2588 }
2589
2590 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2591 {
2592         struct btree_trans *trans = iter->trans;
2593         struct bpos search_key;
2594         struct bkey_s_c k;
2595         int ret;
2596
2597         bch2_trans_verify_not_unlocked(trans);
2598         bch2_btree_iter_verify(iter);
2599         bch2_btree_iter_verify_entry_exit(iter);
2600         EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
2601
2602         /* extents can't span inode numbers: */
2603         if ((iter->flags & BTREE_ITER_is_extents) &&
2604             unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2605                 if (iter->pos.inode == KEY_INODE_MAX)
2606                         return bkey_s_c_null;
2607
2608                 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2609         }
2610
2611         search_key = btree_iter_search_key(iter);
2612         iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2613                                         iter->flags & BTREE_ITER_intent,
2614                                         btree_iter_ip_allocated(iter));
2615
2616         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2617         if (unlikely(ret)) {
2618                 k = bkey_s_c_err(ret);
2619                 goto out_no_locked;
2620         }
2621
2622         if ((iter->flags & BTREE_ITER_cached) ||
2623             !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
2624                 k = bkey_s_c_null;
2625
2626                 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2627                              trans->nr_updates)) {
2628                         bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2629                         if (k.k)
2630                                 goto out;
2631                 }
2632
2633                 if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
2634                     (k = btree_trans_peek_slot_journal(trans, iter)).k)
2635                         goto out;
2636
2637                 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2638                     (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2639                         if (!bkey_err(k))
2640                                 iter->k = *k.k;
2641                         /* We're not returning a key from iter->path: */
2642                         goto out_no_locked;
2643                 }
2644
2645                 k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2646                 if (unlikely(!k.k))
2647                         goto out_no_locked;
2648         } else {
2649                 struct bpos next;
2650                 struct bpos end = iter->pos;
2651
2652                 if (iter->flags & BTREE_ITER_is_extents)
2653                         end.offset = U64_MAX;
2654
2655                 EBUG_ON(btree_iter_path(trans, iter)->level);
2656
2657                 if (iter->flags & BTREE_ITER_intent) {
2658                         struct btree_iter iter2;
2659
2660                         bch2_trans_copy_iter(&iter2, iter);
2661                         k = bch2_btree_iter_peek_upto(&iter2, end);
2662
2663                         if (k.k && !bkey_err(k)) {
2664                                 swap(iter->key_cache_path, iter2.key_cache_path);
2665                                 iter->k = iter2.k;
2666                                 k.k = &iter->k;
2667                         }
2668                         bch2_trans_iter_exit(trans, &iter2);
2669                 } else {
2670                         struct bpos pos = iter->pos;
2671
2672                         k = bch2_btree_iter_peek_upto(iter, end);
2673                         if (unlikely(bkey_err(k)))
2674                                 bch2_btree_iter_set_pos(iter, pos);
2675                         else
2676                                 iter->pos = pos;
2677                 }
2678
2679                 if (unlikely(bkey_err(k)))
2680                         goto out_no_locked;
2681
2682                 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2683
2684                 if (bkey_lt(iter->pos, next)) {
2685                         bkey_init(&iter->k);
2686                         iter->k.p = iter->pos;
2687
2688                         if (iter->flags & BTREE_ITER_is_extents) {
2689                                 bch2_key_resize(&iter->k,
2690                                                 min_t(u64, KEY_SIZE_MAX,
2691                                                       (next.inode == iter->pos.inode
2692                                                        ? next.offset
2693                                                        : KEY_OFFSET_MAX) -
2694                                                       iter->pos.offset));
2695                                 EBUG_ON(!iter->k.size);
2696                         }
2697
2698                         k = (struct bkey_s_c) { &iter->k, NULL };
2699                 }
2700         }
2701 out:
2702         btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2703 out_no_locked:
2704         bch2_btree_iter_verify_entry_exit(iter);
2705         bch2_btree_iter_verify(iter);
2706         ret = bch2_btree_iter_verify_ret(iter, k);
2707         if (unlikely(ret))
2708                 return bkey_s_c_err(ret);
2709
2710         return k;
2711 }
2712
2713 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2714 {
2715         if (!bch2_btree_iter_advance(iter))
2716                 return bkey_s_c_null;
2717
2718         return bch2_btree_iter_peek_slot(iter);
2719 }
2720
2721 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2722 {
2723         if (!bch2_btree_iter_rewind(iter))
2724                 return bkey_s_c_null;
2725
2726         return bch2_btree_iter_peek_slot(iter);
2727 }
2728
2729 /* Obsolete, but still used by rust wrapper in -tools */
2730 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2731 {
2732         struct bkey_s_c k;
2733
2734         while (btree_trans_too_many_iters(iter->trans) ||
2735                (k = bch2_btree_iter_peek_type(iter, iter->flags),
2736                 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2737                 bch2_trans_begin(iter->trans);
2738
2739         return k;
2740 }
2741
2742 /* new transactional stuff: */
2743
2744 #ifdef CONFIG_BCACHEFS_DEBUG
2745 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2746 {
2747         struct btree_path *path;
2748         unsigned i;
2749
2750         BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2751
2752         trans_for_each_path(trans, path, i) {
2753                 BUG_ON(path->sorted_idx >= trans->nr_sorted);
2754                 BUG_ON(trans->sorted[path->sorted_idx] != i);
2755         }
2756
2757         for (i = 0; i < trans->nr_sorted; i++) {
2758                 unsigned idx = trans->sorted[i];
2759
2760                 BUG_ON(!test_bit(idx, trans->paths_allocated));
2761                 BUG_ON(trans->paths[idx].sorted_idx != i);
2762         }
2763 }
2764
2765 static void btree_trans_verify_sorted(struct btree_trans *trans)
2766 {
2767         struct btree_path *path, *prev = NULL;
2768         struct trans_for_each_path_inorder_iter iter;
2769
2770         if (!bch2_debug_check_iterators)
2771                 return;
2772
2773         trans_for_each_path_inorder(trans, path, iter) {
2774                 if (prev && btree_path_cmp(prev, path) > 0) {
2775                         __bch2_dump_trans_paths_updates(trans, true);
2776                         panic("trans paths out of order!\n");
2777                 }
2778                 prev = path;
2779         }
2780 }
2781 #else
2782 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2783 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2784 #endif
2785
2786 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2787 {
2788         int i, l = 0, r = trans->nr_sorted, inc = 1;
2789         bool swapped;
2790
2791         btree_trans_verify_sorted_refs(trans);
2792
2793         if (trans->paths_sorted)
2794                 goto out;
2795
2796         /*
2797          * Cocktail shaker sort: this is efficient because iterators will be
2798          * mostly sorted.
2799          */
2800         do {
2801                 swapped = false;
2802
2803                 for (i = inc > 0 ? l : r - 2;
2804                      i + 1 < r && i >= l;
2805                      i += inc) {
2806                         if (btree_path_cmp(trans->paths + trans->sorted[i],
2807                                            trans->paths + trans->sorted[i + 1]) > 0) {
2808                                 swap(trans->sorted[i], trans->sorted[i + 1]);
2809                                 trans->paths[trans->sorted[i]].sorted_idx = i;
2810                                 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2811                                 swapped = true;
2812                         }
2813                 }
2814
2815                 if (inc > 0)
2816                         --r;
2817                 else
2818                         l++;
2819                 inc = -inc;
2820         } while (swapped);
2821
2822         trans->paths_sorted = true;
2823 out:
2824         btree_trans_verify_sorted(trans);
2825 }
2826
2827 static inline void btree_path_list_remove(struct btree_trans *trans,
2828                                           struct btree_path *path)
2829 {
2830         EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2831 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2832         trans->nr_sorted--;
2833         memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2834                                 trans->sorted + path->sorted_idx + 1,
2835                                 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2836                                              sizeof(u64) / sizeof(btree_path_idx_t)));
2837 #else
2838         array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2839 #endif
2840         for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2841                 trans->paths[trans->sorted[i]].sorted_idx = i;
2842 }
2843
2844 static inline void btree_path_list_add(struct btree_trans *trans,
2845                                        btree_path_idx_t pos,
2846                                        btree_path_idx_t path_idx)
2847 {
2848         struct btree_path *path = trans->paths + path_idx;
2849
2850         path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
2851
2852 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2853         memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
2854                               trans->sorted + path->sorted_idx,
2855                               DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2856                                            sizeof(u64) / sizeof(btree_path_idx_t)));
2857         trans->nr_sorted++;
2858         trans->sorted[path->sorted_idx] = path_idx;
2859 #else
2860         array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
2861 #endif
2862
2863         for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2864                 trans->paths[trans->sorted[i]].sorted_idx = i;
2865
2866         btree_trans_verify_sorted_refs(trans);
2867 }
2868
2869 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2870 {
2871         if (iter->update_path)
2872                 bch2_path_put_nokeep(trans, iter->update_path,
2873                               iter->flags & BTREE_ITER_intent);
2874         if (iter->path)
2875                 bch2_path_put(trans, iter->path,
2876                               iter->flags & BTREE_ITER_intent);
2877         if (iter->key_cache_path)
2878                 bch2_path_put(trans, iter->key_cache_path,
2879                               iter->flags & BTREE_ITER_intent);
2880         iter->path              = 0;
2881         iter->update_path       = 0;
2882         iter->key_cache_path    = 0;
2883         iter->trans             = NULL;
2884 }
2885
2886 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
2887                           struct btree_iter *iter,
2888                           enum btree_id btree_id, struct bpos pos,
2889                           unsigned flags)
2890 {
2891         bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2892                                bch2_btree_iter_flags(trans, btree_id, flags),
2893                                _RET_IP_);
2894 }
2895
2896 void bch2_trans_node_iter_init(struct btree_trans *trans,
2897                                struct btree_iter *iter,
2898                                enum btree_id btree_id,
2899                                struct bpos pos,
2900                                unsigned locks_want,
2901                                unsigned depth,
2902                                unsigned flags)
2903 {
2904         flags |= BTREE_ITER_not_extents;
2905         flags |= BTREE_ITER_snapshot_field;
2906         flags |= BTREE_ITER_all_snapshots;
2907
2908         bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
2909                                __bch2_btree_iter_flags(trans, btree_id, flags),
2910                                _RET_IP_);
2911
2912         iter->min_depth = depth;
2913
2914         struct btree_path *path = btree_iter_path(trans, iter);
2915         BUG_ON(path->locks_want  < min(locks_want, BTREE_MAX_DEPTH));
2916         BUG_ON(path->level      != depth);
2917         BUG_ON(iter->min_depth  != depth);
2918 }
2919
2920 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2921 {
2922         struct btree_trans *trans = src->trans;
2923
2924         *dst = *src;
2925 #ifdef TRACK_PATH_ALLOCATED
2926         dst->ip_allocated = _RET_IP_;
2927 #endif
2928         if (src->path)
2929                 __btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
2930         if (src->update_path)
2931                 __btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
2932         dst->key_cache_path = 0;
2933 }
2934
2935 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2936 {
2937         struct bch_fs *c = trans->c;
2938         unsigned new_top = trans->mem_top + size;
2939         unsigned old_bytes = trans->mem_bytes;
2940         unsigned new_bytes = roundup_pow_of_two(new_top);
2941         int ret;
2942         void *new_mem;
2943         void *p;
2944
2945         WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2946
2947         struct btree_transaction_stats *s = btree_trans_stats(trans);
2948         s->max_mem = max(s->max_mem, new_bytes);
2949
2950         if (trans->used_mempool) {
2951                 if (trans->mem_bytes >= new_bytes)
2952                         goto out_change_top;
2953
2954                 /* No more space from mempool item, need malloc new one */
2955                 new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2956                 if (unlikely(!new_mem)) {
2957                         bch2_trans_unlock(trans);
2958
2959                         new_mem = kmalloc(new_bytes, GFP_KERNEL);
2960                         if (!new_mem)
2961                                 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2962
2963                         ret = bch2_trans_relock(trans);
2964                         if (ret) {
2965                                 kfree(new_mem);
2966                                 return ERR_PTR(ret);
2967                         }
2968                 }
2969                 memcpy(new_mem, trans->mem, trans->mem_top);
2970                 trans->used_mempool = false;
2971                 mempool_free(trans->mem, &c->btree_trans_mem_pool);
2972                 goto out_new_mem;
2973         }
2974
2975         new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2976         if (unlikely(!new_mem)) {
2977                 bch2_trans_unlock(trans);
2978
2979                 new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
2980                 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2981                         new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2982                         new_bytes = BTREE_TRANS_MEM_MAX;
2983                         memcpy(new_mem, trans->mem, trans->mem_top);
2984                         trans->used_mempool = true;
2985                         kfree(trans->mem);
2986                 }
2987
2988                 if (!new_mem)
2989                         return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2990
2991                 trans->mem = new_mem;
2992                 trans->mem_bytes = new_bytes;
2993
2994                 ret = bch2_trans_relock(trans);
2995                 if (ret)
2996                         return ERR_PTR(ret);
2997         }
2998 out_new_mem:
2999         trans->mem = new_mem;
3000         trans->mem_bytes = new_bytes;
3001
3002         if (old_bytes) {
3003                 trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
3004                 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
3005         }
3006 out_change_top:
3007         p = trans->mem + trans->mem_top;
3008         trans->mem_top += size;
3009         memset(p, 0, size);
3010         return p;
3011 }
3012
3013 static inline void check_srcu_held_too_long(struct btree_trans *trans)
3014 {
3015         WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
3016              "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
3017              (jiffies - trans->srcu_lock_time) / HZ);
3018 }
3019
3020 void bch2_trans_srcu_unlock(struct btree_trans *trans)
3021 {
3022         if (trans->srcu_held) {
3023                 struct bch_fs *c = trans->c;
3024                 struct btree_path *path;
3025                 unsigned i;
3026
3027                 trans_for_each_path(trans, path, i)
3028                         if (path->cached && !btree_node_locked(path, 0))
3029                                 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
3030
3031                 check_srcu_held_too_long(trans);
3032                 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3033                 trans->srcu_held = false;
3034         }
3035 }
3036
3037 static void bch2_trans_srcu_lock(struct btree_trans *trans)
3038 {
3039         if (!trans->srcu_held) {
3040                 trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
3041                 trans->srcu_lock_time   = jiffies;
3042                 trans->srcu_held = true;
3043         }
3044 }
3045
3046 /**
3047  * bch2_trans_begin() - reset a transaction after a interrupted attempt
3048  * @trans: transaction to reset
3049  *
3050  * Returns:     current restart counter, to be used with trans_was_restarted()
3051  *
3052  * While iterating over nodes or updating nodes a attempt to lock a btree node
3053  * may return BCH_ERR_transaction_restart when the trylock fails. When this
3054  * occurs bch2_trans_begin() should be called and the transaction retried.
3055  */
3056 u32 bch2_trans_begin(struct btree_trans *trans)
3057 {
3058         struct btree_path *path;
3059         unsigned i;
3060         u64 now;
3061
3062         bch2_trans_reset_updates(trans);
3063
3064         trans->restart_count++;
3065         trans->mem_top                  = 0;
3066         trans->journal_entries          = NULL;
3067
3068         trans_for_each_path(trans, path, i) {
3069                 path->should_be_locked = false;
3070
3071                 /*
3072                  * If the transaction wasn't restarted, we're presuming to be
3073                  * doing something new: dont keep iterators excpt the ones that
3074                  * are in use - except for the subvolumes btree:
3075                  */
3076                 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3077                         path->preserve = false;
3078
3079                 /*
3080                  * XXX: we probably shouldn't be doing this if the transaction
3081                  * was restarted, but currently we still overflow transaction
3082                  * iterators if we do that
3083                  */
3084                 if (!path->ref && !path->preserve)
3085                         __bch2_path_free(trans, i);
3086                 else
3087                         path->preserve = false;
3088         }
3089
3090         now = local_clock();
3091
3092         if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
3093             time_after64(now, trans->last_begin_time + 10))
3094                 __bch2_time_stats_update(&btree_trans_stats(trans)->duration,
3095                                          trans->last_begin_time, now);
3096
3097         if (!trans->restarted &&
3098             (need_resched() ||
3099              time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
3100                 bch2_trans_unlock(trans);
3101                 cond_resched();
3102                 now = local_clock();
3103         }
3104         trans->last_begin_time = now;
3105
3106         if (unlikely(trans->srcu_held &&
3107                      time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
3108                 bch2_trans_srcu_unlock(trans);
3109
3110         trans->last_begin_ip = _RET_IP_;
3111
3112         trans_set_locked(trans);
3113
3114         if (trans->restarted) {
3115                 bch2_btree_path_traverse_all(trans);
3116                 trans->notrace_relock_fail = false;
3117         }
3118
3119         bch2_trans_verify_not_unlocked(trans);
3120         return trans->restart_count;
3121 }
3122
3123 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
3124
3125 unsigned bch2_trans_get_fn_idx(const char *fn)
3126 {
3127         for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
3128                 if (!bch2_btree_transaction_fns[i] ||
3129                     bch2_btree_transaction_fns[i] == fn) {
3130                         bch2_btree_transaction_fns[i] = fn;
3131                         return i;
3132                 }
3133
3134         pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3135         return 0;
3136 }
3137
3138 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
3139         __acquires(&c->btree_trans_barrier)
3140 {
3141         struct btree_trans *trans;
3142
3143         if (IS_ENABLED(__KERNEL__)) {
3144                 trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
3145                 if (trans) {
3146                         memset(trans, 0, offsetof(struct btree_trans, list));
3147                         goto got_trans;
3148                 }
3149         }
3150
3151         trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
3152         memset(trans, 0, sizeof(*trans));
3153
3154         seqmutex_lock(&c->btree_trans_lock);
3155         if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
3156                 struct btree_trans *pos;
3157                 pid_t pid = current->pid;
3158
3159                 trans->locking_wait.task = current;
3160
3161                 list_for_each_entry(pos, &c->btree_trans_list, list) {
3162                         struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
3163                         /*
3164                          * We'd much prefer to be stricter here and completely
3165                          * disallow multiple btree_trans in the same thread -
3166                          * but the data move path calls bch2_write when we
3167                          * already have a btree_trans initialized.
3168                          */
3169                         BUG_ON(pos_task &&
3170                                pid == pos_task->pid &&
3171                                pos->locked);
3172                 }
3173         }
3174
3175         list_add(&trans->list, &c->btree_trans_list);
3176         seqmutex_unlock(&c->btree_trans_lock);
3177 got_trans:
3178         trans->c                = c;
3179         trans->last_begin_time  = local_clock();
3180         trans->fn_idx           = fn_idx;
3181         trans->locking_wait.task = current;
3182         trans->journal_replay_not_finished =
3183                 unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
3184                 atomic_inc_not_zero(&c->journal_keys.ref);
3185         trans->nr_paths         = ARRAY_SIZE(trans->_paths);
3186         trans->paths_allocated  = trans->_paths_allocated;
3187         trans->sorted           = trans->_sorted;
3188         trans->paths            = trans->_paths;
3189         trans->updates          = trans->_updates;
3190
3191         *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3192
3193         trans->paths_allocated[0] = 1;
3194
3195         static struct lock_class_key lockdep_key;
3196         lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
3197
3198         if (fn_idx < BCH_TRANSACTIONS_NR) {
3199                 trans->fn = bch2_btree_transaction_fns[fn_idx];
3200
3201                 struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3202
3203                 if (s->max_mem) {
3204                         unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3205
3206                         trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3207                         if (likely(trans->mem))
3208                                 trans->mem_bytes = expected_mem_bytes;
3209                 }
3210
3211                 trans->nr_paths_max = s->nr_max_paths;
3212                 trans->journal_entries_size = s->journal_entries_size;
3213         }
3214
3215         trans->srcu_idx         = srcu_read_lock(&c->btree_trans_barrier);
3216         trans->srcu_lock_time   = jiffies;
3217         trans->srcu_held        = true;
3218         trans_set_locked(trans);
3219
3220         closure_init_stack_release(&trans->ref);
3221         return trans;
3222 }
3223
3224 static void check_btree_paths_leaked(struct btree_trans *trans)
3225 {
3226 #ifdef CONFIG_BCACHEFS_DEBUG
3227         struct bch_fs *c = trans->c;
3228         struct btree_path *path;
3229         unsigned i;
3230
3231         trans_for_each_path(trans, path, i)
3232                 if (path->ref)
3233                         goto leaked;
3234         return;
3235 leaked:
3236         bch_err(c, "btree paths leaked from %s!", trans->fn);
3237         trans_for_each_path(trans, path, i)
3238                 if (path->ref)
3239                         printk(KERN_ERR "  btree %s %pS\n",
3240                                bch2_btree_id_str(path->btree_id),
3241                                (void *) path->ip_allocated);
3242         /* Be noisy about this: */
3243         bch2_fatal_error(c);
3244 #endif
3245 }
3246
3247 void bch2_trans_put(struct btree_trans *trans)
3248         __releases(&c->btree_trans_barrier)
3249 {
3250         struct bch_fs *c = trans->c;
3251
3252         bch2_trans_unlock(trans);
3253
3254         trans_for_each_update(trans, i)
3255                 __btree_path_put(trans, trans->paths + i->path, true);
3256         trans->nr_updates       = 0;
3257
3258         check_btree_paths_leaked(trans);
3259
3260         if (trans->srcu_held) {
3261                 check_srcu_held_too_long(trans);
3262                 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3263         }
3264
3265         if (unlikely(trans->journal_replay_not_finished))
3266                 bch2_journal_keys_put(c);
3267
3268         /*
3269          * trans->ref protects trans->locking_wait.task, btree_paths array; used
3270          * by cycle detector
3271          */
3272         closure_return_sync(&trans->ref);
3273         trans->locking_wait.task = NULL;
3274
3275         unsigned long *paths_allocated = trans->paths_allocated;
3276         trans->paths_allocated  = NULL;
3277         trans->paths            = NULL;
3278
3279         if (paths_allocated != trans->_paths_allocated)
3280                 kvfree_rcu_mightsleep(paths_allocated);
3281
3282         if (trans->used_mempool)
3283                 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3284         else
3285                 kfree(trans->mem);
3286
3287         /* Userspace doesn't have a real percpu implementation: */
3288         if (IS_ENABLED(__KERNEL__))
3289                 trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3290
3291         if (trans) {
3292                 seqmutex_lock(&c->btree_trans_lock);
3293                 list_del(&trans->list);
3294                 seqmutex_unlock(&c->btree_trans_lock);
3295
3296                 mempool_free(trans, &c->btree_trans_pool);
3297         }
3298 }
3299
3300 bool bch2_current_has_btree_trans(struct bch_fs *c)
3301 {
3302         seqmutex_lock(&c->btree_trans_lock);
3303         struct btree_trans *trans;
3304         bool ret = false;
3305         list_for_each_entry(trans, &c->btree_trans_list, list)
3306                 if (trans->locking_wait.task == current &&
3307                     trans->locked) {
3308                         ret = true;
3309                         break;
3310                 }
3311         seqmutex_unlock(&c->btree_trans_lock);
3312         return ret;
3313 }
3314
3315 static void __maybe_unused
3316 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3317                                       struct btree_bkey_cached_common *b)
3318 {
3319         struct six_lock_count c = six_lock_counts(&b->lock);
3320         struct task_struct *owner;
3321         pid_t pid;
3322
3323         rcu_read_lock();
3324         owner = READ_ONCE(b->lock.owner);
3325         pid = owner ? owner->pid : 0;
3326         rcu_read_unlock();
3327
3328         prt_printf(out, "\t%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
3329                    b->level, bch2_btree_id_str(b->btree_id));
3330         bch2_bpos_to_text(out, btree_node_pos(b));
3331
3332         prt_printf(out, "\t locks %u:%u:%u held by pid %u",
3333                    c.n[0], c.n[1], c.n[2], pid);
3334 }
3335
3336 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3337 {
3338         struct btree_bkey_cached_common *b;
3339         static char lock_types[] = { 'r', 'i', 'w' };
3340         struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3341         unsigned l, idx;
3342
3343         /* before rcu_read_lock(): */
3344         bch2_printbuf_make_room(out, 4096);
3345
3346         if (!out->nr_tabstops) {
3347                 printbuf_tabstop_push(out, 16);
3348                 printbuf_tabstop_push(out, 32);
3349         }
3350
3351         prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3352
3353         /* trans->paths is rcu protected vs. freeing */
3354         rcu_read_lock();
3355         out->atomic++;
3356
3357         struct btree_path *paths = rcu_dereference(trans->paths);
3358         if (!paths)
3359                 goto out;
3360
3361         unsigned long *paths_allocated = trans_paths_allocated(paths);
3362
3363         trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3364                 struct btree_path *path = paths + idx;
3365                 if (!path->nodes_locked)
3366                         continue;
3367
3368                 prt_printf(out, "  path %u %c l=%u %s:",
3369                        idx,
3370                        path->cached ? 'c' : 'b',
3371                        path->level,
3372                        bch2_btree_id_str(path->btree_id));
3373                 bch2_bpos_to_text(out, path->pos);
3374                 prt_newline(out);
3375
3376                 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3377                         if (btree_node_locked(path, l) &&
3378                             !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3379                                 prt_printf(out, "    %c l=%u ",
3380                                            lock_types[btree_node_locked_type(path, l)], l);
3381                                 bch2_btree_bkey_cached_common_to_text(out, b);
3382                                 prt_newline(out);
3383                         }
3384                 }
3385         }
3386
3387         b = READ_ONCE(trans->locking);
3388         if (b) {
3389                 prt_printf(out, "  blocked for %lluus on\n",
3390                            div_u64(local_clock() - trans->locking_wait.start_time, 1000));
3391                 prt_printf(out, "    %c", lock_types[trans->locking_wait.lock_want]);
3392                 bch2_btree_bkey_cached_common_to_text(out, b);
3393                 prt_newline(out);
3394         }
3395 out:
3396         --out->atomic;
3397         rcu_read_unlock();
3398 }
3399
3400 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3401 {
3402         struct btree_transaction_stats *s;
3403         struct btree_trans *trans;
3404         int cpu;
3405
3406         if (c->btree_trans_bufs)
3407                 for_each_possible_cpu(cpu) {
3408                         struct btree_trans *trans =
3409                                 per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3410
3411                         if (trans) {
3412                                 seqmutex_lock(&c->btree_trans_lock);
3413                                 list_del(&trans->list);
3414                                 seqmutex_unlock(&c->btree_trans_lock);
3415                         }
3416                         kfree(trans);
3417                 }
3418         free_percpu(c->btree_trans_bufs);
3419
3420         trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3421         if (trans)
3422                 panic("%s leaked btree_trans\n", trans->fn);
3423
3424         for (s = c->btree_transaction_stats;
3425              s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3426              s++) {
3427                 kfree(s->max_paths_text);
3428                 bch2_time_stats_exit(&s->lock_hold_times);
3429         }
3430
3431         if (c->btree_trans_barrier_initialized) {
3432                 synchronize_srcu_expedited(&c->btree_trans_barrier);
3433                 cleanup_srcu_struct(&c->btree_trans_barrier);
3434         }
3435         mempool_exit(&c->btree_trans_mem_pool);
3436         mempool_exit(&c->btree_trans_pool);
3437 }
3438
3439 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3440 {
3441         struct btree_transaction_stats *s;
3442
3443         for (s = c->btree_transaction_stats;
3444              s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3445              s++) {
3446                 bch2_time_stats_init(&s->duration);
3447                 bch2_time_stats_init(&s->lock_hold_times);
3448                 mutex_init(&s->lock);
3449         }
3450
3451         INIT_LIST_HEAD(&c->btree_trans_list);
3452         seqmutex_init(&c->btree_trans_lock);
3453 }
3454
3455 int bch2_fs_btree_iter_init(struct bch_fs *c)
3456 {
3457         int ret;
3458
3459         c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3460         if (!c->btree_trans_bufs)
3461                 return -ENOMEM;
3462
3463         ret   = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3464                                           sizeof(struct btree_trans)) ?:
3465                 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3466                                           BTREE_TRANS_MEM_MAX) ?:
3467                 init_srcu_struct(&c->btree_trans_barrier);
3468         if (ret)
3469                 return ret;
3470
3471         /*
3472          * static annotation (hackily done) for lock ordering of reclaim vs.
3473          * btree node locks:
3474          */
3475 #ifdef CONFIG_LOCKDEP
3476         fs_reclaim_acquire(GFP_KERNEL);
3477         struct btree_trans *trans = bch2_trans_get(c);
3478         trans_set_locked(trans);
3479         bch2_trans_put(trans);
3480         fs_reclaim_release(GFP_KERNEL);
3481 #endif
3482
3483         c->btree_trans_barrier_initialized = true;
3484         return 0;
3485
3486 }