Merge tag 'platform-drivers-x86-v6.9-3' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / fs / bcachefs / journal_seq_blacklist.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_iter.h"
5 #include "eytzinger.h"
6 #include "journal_seq_blacklist.h"
7 #include "super-io.h"
8
9 /*
10  * journal_seq_blacklist machinery:
11  *
12  * To guarantee order of btree updates after a crash, we need to detect when a
13  * btree node entry (bset) is newer than the newest journal entry that was
14  * successfully written, and ignore it - effectively ignoring any btree updates
15  * that didn't make it into the journal.
16  *
17  * If we didn't do this, we might have two btree nodes, a and b, both with
18  * updates that weren't written to the journal yet: if b was updated after a,
19  * but b was flushed and not a - oops; on recovery we'll find that the updates
20  * to b happened, but not the updates to a that happened before it.
21  *
22  * Ignoring bsets that are newer than the newest journal entry is always safe,
23  * because everything they contain will also have been journalled - and must
24  * still be present in the journal on disk until a journal entry has been
25  * written _after_ that bset was written.
26  *
27  * To accomplish this, bsets record the newest journal sequence number they
28  * contain updates for; then, on startup, the btree code queries the journal
29  * code to ask "Is this sequence number newer than the newest journal entry? If
30  * so, ignore it."
31  *
32  * When this happens, we must blacklist that journal sequence number: the
33  * journal must not write any entries with that sequence number, and it must
34  * record that it was blacklisted so that a) on recovery we don't think we have
35  * missing journal entries and b) so that the btree code continues to ignore
36  * that bset, until that btree node is rewritten.
37  */
38
39 static unsigned sb_blacklist_u64s(unsigned nr)
40 {
41         struct bch_sb_field_journal_seq_blacklist *bl;
42
43         return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64);
44 }
45
46 int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
47 {
48         struct bch_sb_field_journal_seq_blacklist *bl;
49         unsigned i = 0, nr;
50         int ret = 0;
51
52         mutex_lock(&c->sb_lock);
53         bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
54         nr = blacklist_nr_entries(bl);
55
56         while (i < nr) {
57                 struct journal_seq_blacklist_entry *e =
58                         bl->start + i;
59
60                 if (end < le64_to_cpu(e->start))
61                         break;
62
63                 if (start > le64_to_cpu(e->end)) {
64                         i++;
65                         continue;
66                 }
67
68                 /*
69                  * Entry is contiguous or overlapping with new entry: merge it
70                  * with new entry, and delete:
71                  */
72
73                 start   = min(start,    le64_to_cpu(e->start));
74                 end     = max(end,      le64_to_cpu(e->end));
75                 array_remove_item(bl->start, nr, i);
76         }
77
78         bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
79                                   sb_blacklist_u64s(nr + 1));
80         if (!bl) {
81                 ret = -BCH_ERR_ENOSPC_sb_journal_seq_blacklist;
82                 goto out;
83         }
84
85         array_insert_item(bl->start, nr, i, ((struct journal_seq_blacklist_entry) {
86                 .start  = cpu_to_le64(start),
87                 .end    = cpu_to_le64(end),
88         }));
89         c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
90
91         ret = bch2_write_super(c);
92 out:
93         mutex_unlock(&c->sb_lock);
94
95         return ret ?: bch2_blacklist_table_initialize(c);
96 }
97
98 static int journal_seq_blacklist_table_cmp(const void *_l, const void *_r)
99 {
100         const struct journal_seq_blacklist_table_entry *l = _l;
101         const struct journal_seq_blacklist_table_entry *r = _r;
102
103         return cmp_int(l->start, r->start);
104 }
105
106 bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
107                                      bool dirty)
108 {
109         struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
110         struct journal_seq_blacklist_table_entry search = { .start = seq };
111         int idx;
112
113         if (!t)
114                 return false;
115
116         idx = eytzinger0_find_le(t->entries, t->nr,
117                                  sizeof(t->entries[0]),
118                                  journal_seq_blacklist_table_cmp,
119                                  &search);
120         if (idx < 0)
121                 return false;
122
123         BUG_ON(t->entries[idx].start > seq);
124
125         if (seq >= t->entries[idx].end)
126                 return false;
127
128         if (dirty)
129                 t->entries[idx].dirty = true;
130         return true;
131 }
132
133 int bch2_blacklist_table_initialize(struct bch_fs *c)
134 {
135         struct bch_sb_field_journal_seq_blacklist *bl =
136                 bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
137         struct journal_seq_blacklist_table *t;
138         unsigned i, nr = blacklist_nr_entries(bl);
139
140         if (!bl)
141                 return 0;
142
143         t = kzalloc(struct_size(t, entries, nr), GFP_KERNEL);
144         if (!t)
145                 return -BCH_ERR_ENOMEM_blacklist_table_init;
146
147         t->nr = nr;
148
149         for (i = 0; i < nr; i++) {
150                 t->entries[i].start     = le64_to_cpu(bl->start[i].start);
151                 t->entries[i].end       = le64_to_cpu(bl->start[i].end);
152         }
153
154         eytzinger0_sort(t->entries,
155                         t->nr,
156                         sizeof(t->entries[0]),
157                         journal_seq_blacklist_table_cmp,
158                         NULL);
159
160         kfree(c->journal_seq_blacklist_table);
161         c->journal_seq_blacklist_table = t;
162         return 0;
163 }
164
165 static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb,
166                                                   struct bch_sb_field *f,
167                                                   struct printbuf *err)
168 {
169         struct bch_sb_field_journal_seq_blacklist *bl =
170                 field_to_type(f, journal_seq_blacklist);
171         unsigned i, nr = blacklist_nr_entries(bl);
172
173         for (i = 0; i < nr; i++) {
174                 struct journal_seq_blacklist_entry *e = bl->start + i;
175
176                 if (le64_to_cpu(e->start) >=
177                     le64_to_cpu(e->end)) {
178                         prt_printf(err, "entry %u start >= end (%llu >= %llu)",
179                                i, le64_to_cpu(e->start), le64_to_cpu(e->end));
180                         return -BCH_ERR_invalid_sb_journal_seq_blacklist;
181                 }
182
183                 if (i + 1 < nr &&
184                     le64_to_cpu(e[0].end) >
185                     le64_to_cpu(e[1].start)) {
186                         prt_printf(err, "entry %u out of order with next entry (%llu > %llu)",
187                                i + 1, le64_to_cpu(e[0].end), le64_to_cpu(e[1].start));
188                         return -BCH_ERR_invalid_sb_journal_seq_blacklist;
189                 }
190         }
191
192         return 0;
193 }
194
195 static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out,
196                                                   struct bch_sb *sb,
197                                                   struct bch_sb_field *f)
198 {
199         struct bch_sb_field_journal_seq_blacklist *bl =
200                 field_to_type(f, journal_seq_blacklist);
201         struct journal_seq_blacklist_entry *i;
202         unsigned nr = blacklist_nr_entries(bl);
203
204         for (i = bl->start; i < bl->start + nr; i++) {
205                 if (i != bl->start)
206                         prt_printf(out, " ");
207
208                 prt_printf(out, "%llu-%llu",
209                        le64_to_cpu(i->start),
210                        le64_to_cpu(i->end));
211         }
212         prt_newline(out);
213 }
214
215 const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
216         .validate       = bch2_sb_journal_seq_blacklist_validate,
217         .to_text        = bch2_sb_journal_seq_blacklist_to_text
218 };
219
220 void bch2_blacklist_entries_gc(struct work_struct *work)
221 {
222         struct bch_fs *c = container_of(work, struct bch_fs,
223                                         journal_seq_blacklist_gc_work);
224         struct journal_seq_blacklist_table *t;
225         struct bch_sb_field_journal_seq_blacklist *bl;
226         struct journal_seq_blacklist_entry *src, *dst;
227         struct btree_trans *trans = bch2_trans_get(c);
228         unsigned i, nr, new_nr;
229         int ret;
230
231         for (i = 0; i < BTREE_ID_NR; i++) {
232                 struct btree_iter iter;
233                 struct btree *b;
234
235                 bch2_trans_node_iter_init(trans, &iter, i, POS_MIN,
236                                           0, 0, BTREE_ITER_PREFETCH);
237 retry:
238                 bch2_trans_begin(trans);
239
240                 b = bch2_btree_iter_peek_node(&iter);
241
242                 while (!(ret = PTR_ERR_OR_ZERO(b)) &&
243                        b &&
244                        !test_bit(BCH_FS_stopping, &c->flags))
245                         b = bch2_btree_iter_next_node(&iter);
246
247                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
248                         goto retry;
249
250                 bch2_trans_iter_exit(trans, &iter);
251         }
252
253         bch2_trans_put(trans);
254         if (ret)
255                 return;
256
257         mutex_lock(&c->sb_lock);
258         bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
259         if (!bl)
260                 goto out;
261
262         nr = blacklist_nr_entries(bl);
263         dst = bl->start;
264
265         t = c->journal_seq_blacklist_table;
266         BUG_ON(nr != t->nr);
267
268         for (src = bl->start, i = eytzinger0_first(t->nr);
269              src < bl->start + nr;
270              src++, i = eytzinger0_next(i, nr)) {
271                 BUG_ON(t->entries[i].start      != le64_to_cpu(src->start));
272                 BUG_ON(t->entries[i].end        != le64_to_cpu(src->end));
273
274                 if (t->entries[i].dirty)
275                         *dst++ = *src;
276         }
277
278         new_nr = dst - bl->start;
279
280         bch_info(c, "nr blacklist entries was %u, now %u", nr, new_nr);
281
282         if (new_nr != nr) {
283                 bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
284                                 new_nr ? sb_blacklist_u64s(new_nr) : 0);
285                 BUG_ON(new_nr && !bl);
286
287                 if (!new_nr)
288                         c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3));
289
290                 bch2_write_super(c);
291         }
292 out:
293         mutex_unlock(&c->sb_lock);
294 }