203ee627cab5d5118b2bef3c17cf684dacebb66e
[linux-block.git] / fs / bcachefs / bcachefs_format.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FORMAT_H
3 #define _BCACHEFS_FORMAT_H
4
5 /*
6  * bcachefs on disk data structures
7  *
8  * OVERVIEW:
9  *
10  * There are three main types of on disk data structures in bcachefs (this is
11  * reduced from 5 in bcache)
12  *
13  *  - superblock
14  *  - journal
15  *  - btree
16  *
17  * The btree is the primary structure; most metadata exists as keys in the
18  * various btrees. There are only a small number of btrees, they're not
19  * sharded - we have one btree for extents, another for inodes, et cetera.
20  *
21  * SUPERBLOCK:
22  *
23  * The superblock contains the location of the journal, the list of devices in
24  * the filesystem, and in general any metadata we need in order to decide
25  * whether we can start a filesystem or prior to reading the journal/btree
26  * roots.
27  *
28  * The superblock is extensible, and most of the contents of the superblock are
29  * in variable length, type tagged fields; see struct bch_sb_field.
30  *
31  * Backup superblocks do not reside in a fixed location; also, superblocks do
32  * not have a fixed size. To locate backup superblocks we have struct
33  * bch_sb_layout; we store a copy of this inside every superblock, and also
34  * before the first superblock.
35  *
36  * JOURNAL:
37  *
38  * The journal primarily records btree updates in the order they occurred;
39  * journal replay consists of just iterating over all the keys in the open
40  * journal entries and re-inserting them into the btrees.
41  *
42  * The journal also contains entry types for the btree roots, and blacklisted
43  * journal sequence numbers (see journal_seq_blacklist.c).
44  *
45  * BTREE:
46  *
47  * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48  * 128k-256k) and log structured. We use struct btree_node for writing the first
49  * entry in a given node (offset 0), and struct btree_node_entry for all
50  * subsequent writes.
51  *
52  * After the header, btree node entries contain a list of keys in sorted order.
53  * Values are stored inline with the keys; since values are variable length (and
54  * keys effectively are variable length too, due to packing) we can't do random
55  * access without building up additional in memory tables in the btree node read
56  * path.
57  *
58  * BTREE KEYS (struct bkey):
59  *
60  * The various btrees share a common format for the key - so as to avoid
61  * switching in fastpath lookup/comparison code - but define their own
62  * structures for the key values.
63  *
64  * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65  * size is just under 2k. The common part also contains a type tag for the
66  * value, and a format field indicating whether the key is packed or not (and
67  * also meant to allow adding new key fields in the future, if desired).
68  *
69  * bkeys, when stored within a btree node, may also be packed. In that case, the
70  * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71  * be generous with field sizes in the common part of the key format (64 bit
72  * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
73  */
74
75 #include <asm/types.h>
76 #include <asm/byteorder.h>
77 #include <linux/kernel.h>
78 #include <linux/uuid.h>
79 #include <uapi/linux/magic.h>
80 #include "vstructs.h"
81
82 #ifdef __KERNEL__
83 typedef uuid_t __uuid_t;
84 #endif
85
86 #define BITMASK(name, type, field, offset, end)                         \
87 static const __maybe_unused unsigned    name##_OFFSET = offset;         \
88 static const __maybe_unused unsigned    name##_BITS = (end - offset);   \
89                                                                         \
90 static inline __u64 name(const type *k)                                 \
91 {                                                                       \
92         return (k->field >> offset) & ~(~0ULL << (end - offset));       \
93 }                                                                       \
94                                                                         \
95 static inline void SET_##name(type *k, __u64 v)                         \
96 {                                                                       \
97         k->field &= ~(~(~0ULL << (end - offset)) << offset);            \
98         k->field |= (v & ~(~0ULL << (end - offset))) << offset;         \
99 }
100
101 #define LE_BITMASK(_bits, name, type, field, offset, end)               \
102 static const __maybe_unused unsigned    name##_OFFSET = offset;         \
103 static const __maybe_unused unsigned    name##_BITS = (end - offset);   \
104 static const __maybe_unused __u##_bits  name##_MAX = (1ULL << (end - offset)) - 1;\
105                                                                         \
106 static inline __u64 name(const type *k)                                 \
107 {                                                                       \
108         return (__le##_bits##_to_cpu(k->field) >> offset) &             \
109                 ~(~0ULL << (end - offset));                             \
110 }                                                                       \
111                                                                         \
112 static inline void SET_##name(type *k, __u64 v)                         \
113 {                                                                       \
114         __u##_bits new = __le##_bits##_to_cpu(k->field);                \
115                                                                         \
116         new &= ~(~(~0ULL << (end - offset)) << offset);                 \
117         new |= (v & ~(~0ULL << (end - offset))) << offset;              \
118         k->field = __cpu_to_le##_bits(new);                             \
119 }
120
121 #define LE16_BITMASK(n, t, f, o, e)     LE_BITMASK(16, n, t, f, o, e)
122 #define LE32_BITMASK(n, t, f, o, e)     LE_BITMASK(32, n, t, f, o, e)
123 #define LE64_BITMASK(n, t, f, o, e)     LE_BITMASK(64, n, t, f, o, e)
124
125 struct bkey_format {
126         __u8            key_u64s;
127         __u8            nr_fields;
128         /* One unused slot for now: */
129         __u8            bits_per_field[6];
130         __le64          field_offset[6];
131 };
132
133 /* Btree keys - all units are in sectors */
134
135 struct bpos {
136         /*
137          * Word order matches machine byte order - btree code treats a bpos as a
138          * single large integer, for search/comparison purposes
139          *
140          * Note that wherever a bpos is embedded in another on disk data
141          * structure, it has to be byte swabbed when reading in metadata that
142          * wasn't written in native endian order:
143          */
144 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
145         __u32           snapshot;
146         __u64           offset;
147         __u64           inode;
148 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
149         __u64           inode;
150         __u64           offset;         /* Points to end of extent - sectors */
151         __u32           snapshot;
152 #else
153 #error edit for your odd byteorder.
154 #endif
155 } __packed
156 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
157 __aligned(4)
158 #endif
159 ;
160
161 #define KEY_INODE_MAX                   ((__u64)~0ULL)
162 #define KEY_OFFSET_MAX                  ((__u64)~0ULL)
163 #define KEY_SNAPSHOT_MAX                ((__u32)~0U)
164 #define KEY_SIZE_MAX                    ((__u32)~0U)
165
166 static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
167 {
168         return (struct bpos) {
169                 .inode          = inode,
170                 .offset         = offset,
171                 .snapshot       = snapshot,
172         };
173 }
174
175 #define POS_MIN                         SPOS(0, 0, 0)
176 #define POS_MAX                         SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
177 #define SPOS_MAX                        SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
178 #define POS(_inode, _offset)            SPOS(_inode, _offset, 0)
179
180 /* Empty placeholder struct, for container_of() */
181 struct bch_val {
182         __u64           __nothing[0];
183 };
184
185 struct bversion {
186 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
187         __u64           lo;
188         __u32           hi;
189 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
190         __u32           hi;
191         __u64           lo;
192 #endif
193 } __packed
194 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
195 __aligned(4)
196 #endif
197 ;
198
199 struct bkey {
200         /* Size of combined key and value, in u64s */
201         __u8            u64s;
202
203         /* Format of key (0 for format local to btree node) */
204 #if defined(__LITTLE_ENDIAN_BITFIELD)
205         __u8            format:7,
206                         needs_whiteout:1;
207 #elif defined (__BIG_ENDIAN_BITFIELD)
208         __u8            needs_whiteout:1,
209                         format:7;
210 #else
211 #error edit for your odd byteorder.
212 #endif
213
214         /* Type of the value */
215         __u8            type;
216
217 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
218         __u8            pad[1];
219
220         struct bversion bversion;
221         __u32           size;           /* extent size, in sectors */
222         struct bpos     p;
223 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
224         struct bpos     p;
225         __u32           size;           /* extent size, in sectors */
226         struct bversion version;
227
228         __u8            pad[1];
229 #endif
230 } __packed
231 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
232 /*
233  * The big-endian version of bkey can't be compiled by rustc with the "aligned"
234  * attr since it doesn't allow types to have both "packed" and "aligned" attrs.
235  * So for Rust compatibility, don't include this. It can be included in the LE
236  * version because the "packed" attr is redundant in that case.
237  *
238  * History: (quoting Kent)
239  *
240  * Specifically, when i was designing bkey, I wanted the header to be no
241  * bigger than necessary so that bkey_packed could use the rest. That means that
242  * decently offten extent keys will fit into only 8 bytes, instead of spilling over
243  * to 16.
244  *
245  * But packed_bkey treats the part after the header - the packed section -
246  * as a single multi word, variable length integer. And bkey, the unpacked
247  * version, is just a special case version of a bkey_packed; all the packed
248  * bkey code will work on keys in any packed format, the in-memory
249  * representation of an unpacked key also is just one type of packed key...
250  *
251  * So that constrains the key part of a bkig endian bkey to start right
252  * after the header.
253  *
254  * If we ever do a bkey_v2 and need to expand the hedaer by another byte for
255  * some reason - that will clean up this wart.
256  */
257 __aligned(8)
258 #endif
259 ;
260
261 struct bkey_packed {
262         __u64           _data[0];
263
264         /* Size of combined key and value, in u64s */
265         __u8            u64s;
266
267         /* Format of key (0 for format local to btree node) */
268
269         /*
270          * XXX: next incompat on disk format change, switch format and
271          * needs_whiteout - bkey_packed() will be cheaper if format is the high
272          * bits of the bitfield
273          */
274 #if defined(__LITTLE_ENDIAN_BITFIELD)
275         __u8            format:7,
276                         needs_whiteout:1;
277 #elif defined (__BIG_ENDIAN_BITFIELD)
278         __u8            needs_whiteout:1,
279                         format:7;
280 #endif
281
282         /* Type of the value */
283         __u8            type;
284         __u8            key_start[0];
285
286         /*
287          * We copy bkeys with struct assignment in various places, and while
288          * that shouldn't be done with packed bkeys we can't disallow it in C,
289          * and it's legal to cast a bkey to a bkey_packed  - so padding it out
290          * to the same size as struct bkey should hopefully be safest.
291          */
292         __u8            pad[sizeof(struct bkey) - 3];
293 } __packed __aligned(8);
294
295 typedef struct {
296         __le64                  lo;
297         __le64                  hi;
298 } bch_le128;
299
300 #define BKEY_U64s                       (sizeof(struct bkey) / sizeof(__u64))
301 #define BKEY_U64s_MAX                   U8_MAX
302 #define BKEY_VAL_U64s_MAX               (BKEY_U64s_MAX - BKEY_U64s)
303
304 #define KEY_PACKED_BITS_START           24
305
306 #define KEY_FORMAT_LOCAL_BTREE          0
307 #define KEY_FORMAT_CURRENT              1
308
309 enum bch_bkey_fields {
310         BKEY_FIELD_INODE,
311         BKEY_FIELD_OFFSET,
312         BKEY_FIELD_SNAPSHOT,
313         BKEY_FIELD_SIZE,
314         BKEY_FIELD_VERSION_HI,
315         BKEY_FIELD_VERSION_LO,
316         BKEY_NR_FIELDS,
317 };
318
319 #define bkey_format_field(name, field)                                  \
320         [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
321
322 #define BKEY_FORMAT_CURRENT                                             \
323 ((struct bkey_format) {                                                 \
324         .key_u64s       = BKEY_U64s,                                    \
325         .nr_fields      = BKEY_NR_FIELDS,                               \
326         .bits_per_field = {                                             \
327                 bkey_format_field(INODE,        p.inode),               \
328                 bkey_format_field(OFFSET,       p.offset),              \
329                 bkey_format_field(SNAPSHOT,     p.snapshot),            \
330                 bkey_format_field(SIZE,         size),                  \
331                 bkey_format_field(VERSION_HI,   bversion.hi),           \
332                 bkey_format_field(VERSION_LO,   bversion.lo),           \
333         },                                                              \
334 })
335
336 /* bkey with inline value */
337 struct bkey_i {
338         __u64                   _data[0];
339
340         struct bkey     k;
341         struct bch_val  v;
342 };
343
344 #define POS_KEY(_pos)                                                   \
345 ((struct bkey) {                                                        \
346         .u64s           = BKEY_U64s,                                    \
347         .format         = KEY_FORMAT_CURRENT,                           \
348         .p              = _pos,                                         \
349 })
350
351 #define KEY(_inode, _offset, _size)                                     \
352 ((struct bkey) {                                                        \
353         .u64s           = BKEY_U64s,                                    \
354         .format         = KEY_FORMAT_CURRENT,                           \
355         .p              = POS(_inode, _offset),                         \
356         .size           = _size,                                        \
357 })
358
359 static inline void bkey_init(struct bkey *k)
360 {
361         *k = KEY(0, 0, 0);
362 }
363
364 #define bkey_bytes(_k)          ((_k)->u64s * sizeof(__u64))
365
366 #define __BKEY_PADDED(key, pad)                                 \
367         struct bkey_i key; __u64 key ## _pad[pad]
368
369 /*
370  * - DELETED keys are used internally to mark keys that should be ignored but
371  *   override keys in composition order.  Their version number is ignored.
372  *
373  * - DISCARDED keys indicate that the data is all 0s because it has been
374  *   discarded. DISCARDs may have a version; if the version is nonzero the key
375  *   will be persistent, otherwise the key will be dropped whenever the btree
376  *   node is rewritten (like DELETED keys).
377  *
378  * - ERROR: any read of the data returns a read error, as the data was lost due
379  *   to a failing device. Like DISCARDED keys, they can be removed (overridden)
380  *   by new writes or cluster-wide GC. Node repair can also overwrite them with
381  *   the same or a more recent version number, but not with an older version
382  *   number.
383  *
384  * - WHITEOUT: for hash table btrees
385  */
386 #define BCH_BKEY_TYPES()                                \
387         x(deleted,              0)                      \
388         x(whiteout,             1)                      \
389         x(error,                2)                      \
390         x(cookie,               3)                      \
391         x(hash_whiteout,        4)                      \
392         x(btree_ptr,            5)                      \
393         x(extent,               6)                      \
394         x(reservation,          7)                      \
395         x(inode,                8)                      \
396         x(inode_generation,     9)                      \
397         x(dirent,               10)                     \
398         x(xattr,                11)                     \
399         x(alloc,                12)                     \
400         x(quota,                13)                     \
401         x(stripe,               14)                     \
402         x(reflink_p,            15)                     \
403         x(reflink_v,            16)                     \
404         x(inline_data,          17)                     \
405         x(btree_ptr_v2,         18)                     \
406         x(indirect_inline_data, 19)                     \
407         x(alloc_v2,             20)                     \
408         x(subvolume,            21)                     \
409         x(snapshot,             22)                     \
410         x(inode_v2,             23)                     \
411         x(alloc_v3,             24)                     \
412         x(set,                  25)                     \
413         x(lru,                  26)                     \
414         x(alloc_v4,             27)                     \
415         x(backpointer,          28)                     \
416         x(inode_v3,             29)                     \
417         x(bucket_gens,          30)                     \
418         x(snapshot_tree,        31)                     \
419         x(logged_op_truncate,   32)                     \
420         x(logged_op_finsert,    33)                     \
421         x(accounting,           34)
422
423 enum bch_bkey_type {
424 #define x(name, nr) KEY_TYPE_##name     = nr,
425         BCH_BKEY_TYPES()
426 #undef x
427         KEY_TYPE_MAX,
428 };
429
430 struct bch_deleted {
431         struct bch_val          v;
432 };
433
434 struct bch_whiteout {
435         struct bch_val          v;
436 };
437
438 struct bch_error {
439         struct bch_val          v;
440 };
441
442 struct bch_cookie {
443         struct bch_val          v;
444         __le64                  cookie;
445 };
446
447 struct bch_hash_whiteout {
448         struct bch_val          v;
449 };
450
451 struct bch_set {
452         struct bch_val          v;
453 };
454
455 /* 128 bits, sufficient for cryptographic MACs: */
456 struct bch_csum {
457         __le64                  lo;
458         __le64                  hi;
459 } __packed __aligned(8);
460
461 struct bch_backpointer {
462         struct bch_val          v;
463         __u8                    btree_id;
464         __u8                    level;
465         __u8                    data_type;
466         __u64                   bucket_offset:40;
467         __u32                   bucket_len;
468         struct bpos             pos;
469 } __packed __aligned(8);
470
471 /* Optional/variable size superblock sections: */
472
473 struct bch_sb_field {
474         __u64                   _data[0];
475         __le32                  u64s;
476         __le32                  type;
477 };
478
479 #define BCH_SB_FIELDS()                         \
480         x(journal,                      0)      \
481         x(members_v1,                   1)      \
482         x(crypt,                        2)      \
483         x(replicas_v0,                  3)      \
484         x(quota,                        4)      \
485         x(disk_groups,                  5)      \
486         x(clean,                        6)      \
487         x(replicas,                     7)      \
488         x(journal_seq_blacklist,        8)      \
489         x(journal_v2,                   9)      \
490         x(counters,                     10)     \
491         x(members_v2,                   11)     \
492         x(errors,                       12)     \
493         x(ext,                          13)     \
494         x(downgrade,                    14)
495
496 #include "alloc_background_format.h"
497 #include "dirent_format.h"
498 #include "disk_accounting_format.h"
499 #include "disk_groups_format.h"
500 #include "extents_format.h"
501 #include "ec_format.h"
502 #include "dirent_format.h"
503 #include "disk_groups_format.h"
504 #include "inode_format.h"
505 #include "journal_seq_blacklist_format.h"
506 #include "logged_ops_format.h"
507 #include "lru_format.h"
508 #include "quota_format.h"
509 #include "reflink_format.h"
510 #include "replicas_format.h"
511 #include "snapshot_format.h"
512 #include "subvolume_format.h"
513 #include "sb-counters_format.h"
514 #include "sb-downgrade_format.h"
515 #include "sb-errors_format.h"
516 #include "sb-members_format.h"
517 #include "xattr_format.h"
518
519 enum bch_sb_field_type {
520 #define x(f, nr)        BCH_SB_FIELD_##f = nr,
521         BCH_SB_FIELDS()
522 #undef x
523         BCH_SB_FIELD_NR
524 };
525
526 /*
527  * Most superblock fields are replicated in all device's superblocks - a few are
528  * not:
529  */
530 #define BCH_SINGLE_DEVICE_SB_FIELDS             \
531         ((1U << BCH_SB_FIELD_journal)|          \
532          (1U << BCH_SB_FIELD_journal_v2))
533
534 /* BCH_SB_FIELD_journal: */
535
536 struct bch_sb_field_journal {
537         struct bch_sb_field     field;
538         __le64                  buckets[];
539 };
540
541 struct bch_sb_field_journal_v2 {
542         struct bch_sb_field     field;
543
544         struct bch_sb_field_journal_v2_entry {
545                 __le64          start;
546                 __le64          nr;
547         }                       d[];
548 };
549
550 /* BCH_SB_FIELD_crypt: */
551
552 struct nonce {
553         __le32                  d[4];
554 };
555
556 struct bch_key {
557         __le64                  key[4];
558 };
559
560 #define BCH_KEY_MAGIC                                   \
561         (((__u64) 'b' <<  0)|((__u64) 'c' <<  8)|               \
562          ((__u64) 'h' << 16)|((__u64) '*' << 24)|               \
563          ((__u64) '*' << 32)|((__u64) 'k' << 40)|               \
564          ((__u64) 'e' << 48)|((__u64) 'y' << 56))
565
566 struct bch_encrypted_key {
567         __le64                  magic;
568         struct bch_key          key;
569 };
570
571 /*
572  * If this field is present in the superblock, it stores an encryption key which
573  * is used encrypt all other data/metadata. The key will normally be encrypted
574  * with the key userspace provides, but if encryption has been turned off we'll
575  * just store the master key unencrypted in the superblock so we can access the
576  * previously encrypted data.
577  */
578 struct bch_sb_field_crypt {
579         struct bch_sb_field     field;
580
581         __le64                  flags;
582         __le64                  kdf_flags;
583         struct bch_encrypted_key key;
584 };
585
586 LE64_BITMASK(BCH_CRYPT_KDF_TYPE,        struct bch_sb_field_crypt, flags, 0, 4);
587
588 enum bch_kdf_types {
589         BCH_KDF_SCRYPT          = 0,
590         BCH_KDF_NR              = 1,
591 };
592
593 /* stored as base 2 log of scrypt params: */
594 LE64_BITMASK(BCH_KDF_SCRYPT_N,  struct bch_sb_field_crypt, kdf_flags,  0, 16);
595 LE64_BITMASK(BCH_KDF_SCRYPT_R,  struct bch_sb_field_crypt, kdf_flags, 16, 32);
596 LE64_BITMASK(BCH_KDF_SCRYPT_P,  struct bch_sb_field_crypt, kdf_flags, 32, 48);
597
598 /*
599  * On clean shutdown, store btree roots and current journal sequence number in
600  * the superblock:
601  */
602 struct jset_entry {
603         __le16                  u64s;
604         __u8                    btree_id;
605         __u8                    level;
606         __u8                    type; /* designates what this jset holds */
607         __u8                    pad[3];
608
609         struct bkey_i           start[0];
610         __u64                   _data[];
611 };
612
613 struct bch_sb_field_clean {
614         struct bch_sb_field     field;
615
616         __le32                  flags;
617         __le16                  _read_clock; /* no longer used */
618         __le16                  _write_clock;
619         __le64                  journal_seq;
620
621         struct jset_entry       start[0];
622         __u64                   _data[];
623 };
624
625 struct bch_sb_field_ext {
626         struct bch_sb_field     field;
627         __le64                  recovery_passes_required[2];
628         __le64                  errors_silent[8];
629         __le64                  btrees_lost_data;
630 };
631
632 /* Superblock: */
633
634 /*
635  * New versioning scheme:
636  * One common version number for all on disk data structures - superblock, btree
637  * nodes, journal entries
638  */
639 #define BCH_VERSION_MAJOR(_v)           ((__u16) ((_v) >> 10))
640 #define BCH_VERSION_MINOR(_v)           ((__u16) ((_v) & ~(~0U << 10)))
641 #define BCH_VERSION(_major, _minor)     (((_major) << 10)|(_minor) << 0)
642
643 /*
644  * field 1:             version name
645  * field 2:             BCH_VERSION(major, minor)
646  * field 3:             recovery passess required on upgrade
647  */
648 #define BCH_METADATA_VERSIONS()                                         \
649         x(bkey_renumber,                BCH_VERSION(0, 10))             \
650         x(inode_btree_change,           BCH_VERSION(0, 11))             \
651         x(snapshot,                     BCH_VERSION(0, 12))             \
652         x(inode_backpointers,           BCH_VERSION(0, 13))             \
653         x(btree_ptr_sectors_written,    BCH_VERSION(0, 14))             \
654         x(snapshot_2,                   BCH_VERSION(0, 15))             \
655         x(reflink_p_fix,                BCH_VERSION(0, 16))             \
656         x(subvol_dirent,                BCH_VERSION(0, 17))             \
657         x(inode_v2,                     BCH_VERSION(0, 18))             \
658         x(freespace,                    BCH_VERSION(0, 19))             \
659         x(alloc_v4,                     BCH_VERSION(0, 20))             \
660         x(new_data_types,               BCH_VERSION(0, 21))             \
661         x(backpointers,                 BCH_VERSION(0, 22))             \
662         x(inode_v3,                     BCH_VERSION(0, 23))             \
663         x(unwritten_extents,            BCH_VERSION(0, 24))             \
664         x(bucket_gens,                  BCH_VERSION(0, 25))             \
665         x(lru_v2,                       BCH_VERSION(0, 26))             \
666         x(fragmentation_lru,            BCH_VERSION(0, 27))             \
667         x(no_bps_in_alloc_keys,         BCH_VERSION(0, 28))             \
668         x(snapshot_trees,               BCH_VERSION(0, 29))             \
669         x(major_minor,                  BCH_VERSION(1,  0))             \
670         x(snapshot_skiplists,           BCH_VERSION(1,  1))             \
671         x(deleted_inodes,               BCH_VERSION(1,  2))             \
672         x(rebalance_work,               BCH_VERSION(1,  3))             \
673         x(member_seq,                   BCH_VERSION(1,  4))             \
674         x(subvolume_fs_parent,          BCH_VERSION(1,  5))             \
675         x(btree_subvolume_children,     BCH_VERSION(1,  6))             \
676         x(mi_btree_bitmap,              BCH_VERSION(1,  7))             \
677         x(bucket_stripe_sectors,        BCH_VERSION(1,  8))             \
678         x(disk_accounting_v2,           BCH_VERSION(1,  9))             \
679         x(disk_accounting_v3,           BCH_VERSION(1, 10))             \
680         x(disk_accounting_inum,         BCH_VERSION(1, 11))             \
681         x(rebalance_work_acct_fix,      BCH_VERSION(1, 12))
682
683 enum bcachefs_metadata_version {
684         bcachefs_metadata_version_min = 9,
685 #define x(t, n) bcachefs_metadata_version_##t = n,
686         BCH_METADATA_VERSIONS()
687 #undef x
688         bcachefs_metadata_version_max
689 };
690
691 static const __maybe_unused
692 unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work;
693
694 #define bcachefs_metadata_version_current       (bcachefs_metadata_version_max - 1)
695
696 #define BCH_SB_SECTOR                   8
697
698 #define BCH_SB_LAYOUT_SIZE_BITS_MAX     16 /* 32 MB */
699
700 struct bch_sb_layout {
701         __uuid_t                magic;  /* bcachefs superblock UUID */
702         __u8                    layout_type;
703         __u8                    sb_max_size_bits; /* base 2 of 512 byte sectors */
704         __u8                    nr_superblocks;
705         __u8                    pad[5];
706         __le64                  sb_offset[61];
707 } __packed __aligned(8);
708
709 #define BCH_SB_LAYOUT_SECTOR    7
710
711 /*
712  * @offset      - sector where this sb was written
713  * @version     - on disk format version
714  * @version_min - Oldest metadata version this filesystem contains; so we can
715  *                safely drop compatibility code and refuse to mount filesystems
716  *                we'd need it for
717  * @magic       - identifies as a bcachefs superblock (BCHFS_MAGIC)
718  * @seq         - incremented each time superblock is written
719  * @uuid        - used for generating various magic numbers and identifying
720  *                member devices, never changes
721  * @user_uuid   - user visible UUID, may be changed
722  * @label       - filesystem label
723  * @seq         - identifies most recent superblock, incremented each time
724  *                superblock is written
725  * @features    - enabled incompatible features
726  */
727 struct bch_sb {
728         struct bch_csum         csum;
729         __le16                  version;
730         __le16                  version_min;
731         __le16                  pad[2];
732         __uuid_t                magic;
733         __uuid_t                uuid;
734         __uuid_t                user_uuid;
735         __u8                    label[BCH_SB_LABEL_SIZE];
736         __le64                  offset;
737         __le64                  seq;
738
739         __le16                  block_size;
740         __u8                    dev_idx;
741         __u8                    nr_devices;
742         __le32                  u64s;
743
744         __le64                  time_base_lo;
745         __le32                  time_base_hi;
746         __le32                  time_precision;
747
748         __le64                  flags[7];
749         __le64                  write_time;
750         __le64                  features[2];
751         __le64                  compat[2];
752
753         struct bch_sb_layout    layout;
754
755         struct bch_sb_field     start[0];
756         __le64                  _data[];
757 } __packed __aligned(8);
758
759 /*
760  * Flags:
761  * BCH_SB_INITALIZED    - set on first mount
762  * BCH_SB_CLEAN         - did we shut down cleanly? Just a hint, doesn't affect
763  *                        behaviour of mount/recovery path:
764  * BCH_SB_INODE_32BIT   - limit inode numbers to 32 bits
765  * BCH_SB_128_BIT_MACS  - 128 bit macs instead of 80
766  * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
767  *                         DATA/META_CSUM_TYPE. Also indicates encryption
768  *                         algorithm in use, if/when we get more than one
769  */
770
771 LE16_BITMASK(BCH_SB_BLOCK_SIZE,         struct bch_sb, block_size, 0, 16);
772
773 LE64_BITMASK(BCH_SB_INITIALIZED,        struct bch_sb, flags[0],  0,  1);
774 LE64_BITMASK(BCH_SB_CLEAN,              struct bch_sb, flags[0],  1,  2);
775 LE64_BITMASK(BCH_SB_CSUM_TYPE,          struct bch_sb, flags[0],  2,  8);
776 LE64_BITMASK(BCH_SB_ERROR_ACTION,       struct bch_sb, flags[0],  8, 12);
777
778 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE,    struct bch_sb, flags[0], 12, 28);
779
780 LE64_BITMASK(BCH_SB_GC_RESERVE,         struct bch_sb, flags[0], 28, 33);
781 LE64_BITMASK(BCH_SB_ROOT_RESERVE,       struct bch_sb, flags[0], 33, 40);
782
783 LE64_BITMASK(BCH_SB_META_CSUM_TYPE,     struct bch_sb, flags[0], 40, 44);
784 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE,     struct bch_sb, flags[0], 44, 48);
785
786 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
787 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
788
789 LE64_BITMASK(BCH_SB_POSIX_ACL,          struct bch_sb, flags[0], 56, 57);
790 LE64_BITMASK(BCH_SB_USRQUOTA,           struct bch_sb, flags[0], 57, 58);
791 LE64_BITMASK(BCH_SB_GRPQUOTA,           struct bch_sb, flags[0], 58, 59);
792 LE64_BITMASK(BCH_SB_PRJQUOTA,           struct bch_sb, flags[0], 59, 60);
793
794 LE64_BITMASK(BCH_SB_HAS_ERRORS,         struct bch_sb, flags[0], 60, 61);
795 LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
796
797 LE64_BITMASK(BCH_SB_BIG_ENDIAN,         struct bch_sb, flags[0], 62, 63);
798 LE64_BITMASK(BCH_SB_PROMOTE_WHOLE_EXTENTS,
799                                         struct bch_sb, flags[0], 63, 64);
800
801 LE64_BITMASK(BCH_SB_STR_HASH_TYPE,      struct bch_sb, flags[1],  0,  4);
802 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1],  4,  8);
803 LE64_BITMASK(BCH_SB_INODE_32BIT,        struct bch_sb, flags[1],  8,  9);
804
805 LE64_BITMASK(BCH_SB_128_BIT_MACS,       struct bch_sb, flags[1],  9, 10);
806 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE,    struct bch_sb, flags[1], 10, 14);
807
808 /*
809  * Max size of an extent that may require bouncing to read or write
810  * (checksummed, compressed): 64k
811  */
812 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
813                                         struct bch_sb, flags[1], 14, 20);
814
815 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ,  struct bch_sb, flags[1], 20, 24);
816 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ,  struct bch_sb, flags[1], 24, 28);
817
818 LE64_BITMASK(BCH_SB_PROMOTE_TARGET,     struct bch_sb, flags[1], 28, 40);
819 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET,  struct bch_sb, flags[1], 40, 52);
820 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET,  struct bch_sb, flags[1], 52, 64);
821
822 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
823                                         struct bch_sb, flags[2],  0,  4);
824 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES,   struct bch_sb, flags[2],  4, 64);
825
826 LE64_BITMASK(BCH_SB_ERASURE_CODE,       struct bch_sb, flags[3],  0, 16);
827 LE64_BITMASK(BCH_SB_METADATA_TARGET,    struct bch_sb, flags[3], 16, 28);
828 LE64_BITMASK(BCH_SB_SHARD_INUMS,        struct bch_sb, flags[3], 28, 29);
829 LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
830 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
831 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
832 LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
833 LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
834 LE64_BITMASK(BCH_SB_NOCOW,              struct bch_sb, flags[4], 33, 34);
835 LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE,  struct bch_sb, flags[4], 34, 54);
836 LE64_BITMASK(BCH_SB_VERSION_UPGRADE,    struct bch_sb, flags[4], 54, 56);
837
838 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
839 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
840                                         struct bch_sb, flags[4], 60, 64);
841
842 LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
843                                         struct bch_sb, flags[5],  0, 16);
844 LE64_BITMASK(BCH_SB_ALLOCATOR_STUCK_TIMEOUT,
845                                         struct bch_sb, flags[5], 16, 32);
846
847 static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
848 {
849         return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
850 }
851
852 static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
853 {
854         SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
855         SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
856 }
857
858 static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
859 {
860         return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
861                 (BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
862 }
863
864 static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
865 {
866         SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
867         SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
868 }
869
870 /*
871  * Features:
872  *
873  * journal_seq_blacklist_v3:    gates BCH_SB_FIELD_journal_seq_blacklist
874  * reflink:                     gates KEY_TYPE_reflink
875  * inline_data:                 gates KEY_TYPE_inline_data
876  * new_siphash:                 gates BCH_STR_HASH_siphash
877  * new_extent_overwrite:        gates BTREE_NODE_NEW_EXTENT_OVERWRITE
878  */
879 #define BCH_SB_FEATURES()                       \
880         x(lz4,                          0)      \
881         x(gzip,                         1)      \
882         x(zstd,                         2)      \
883         x(atomic_nlink,                 3)      \
884         x(ec,                           4)      \
885         x(journal_seq_blacklist_v3,     5)      \
886         x(reflink,                      6)      \
887         x(new_siphash,                  7)      \
888         x(inline_data,                  8)      \
889         x(new_extent_overwrite,         9)      \
890         x(incompressible,               10)     \
891         x(btree_ptr_v2,                 11)     \
892         x(extents_above_btree_updates,  12)     \
893         x(btree_updates_journalled,     13)     \
894         x(reflink_inline_data,          14)     \
895         x(new_varint,                   15)     \
896         x(journal_no_flush,             16)     \
897         x(alloc_v2,                     17)     \
898         x(extents_across_btree_nodes,   18)
899
900 #define BCH_SB_FEATURES_ALWAYS                          \
901         ((1ULL << BCH_FEATURE_new_extent_overwrite)|    \
902          (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
903          (1ULL << BCH_FEATURE_btree_updates_journalled)|\
904          (1ULL << BCH_FEATURE_alloc_v2)|\
905          (1ULL << BCH_FEATURE_extents_across_btree_nodes))
906
907 #define BCH_SB_FEATURES_ALL                             \
908         (BCH_SB_FEATURES_ALWAYS|                        \
909          (1ULL << BCH_FEATURE_new_siphash)|             \
910          (1ULL << BCH_FEATURE_btree_ptr_v2)|            \
911          (1ULL << BCH_FEATURE_new_varint)|              \
912          (1ULL << BCH_FEATURE_journal_no_flush))
913
914 enum bch_sb_feature {
915 #define x(f, n) BCH_FEATURE_##f,
916         BCH_SB_FEATURES()
917 #undef x
918         BCH_FEATURE_NR,
919 };
920
921 #define BCH_SB_COMPAT()                                 \
922         x(alloc_info,                           0)      \
923         x(alloc_metadata,                       1)      \
924         x(extents_above_btree_updates_done,     2)      \
925         x(bformat_overflow_done,                3)
926
927 enum bch_sb_compat {
928 #define x(f, n) BCH_COMPAT_##f,
929         BCH_SB_COMPAT()
930 #undef x
931         BCH_COMPAT_NR,
932 };
933
934 /* options: */
935
936 #define BCH_VERSION_UPGRADE_OPTS()      \
937         x(compatible,           0)      \
938         x(incompatible,         1)      \
939         x(none,                 2)
940
941 enum bch_version_upgrade_opts {
942 #define x(t, n) BCH_VERSION_UPGRADE_##t = n,
943         BCH_VERSION_UPGRADE_OPTS()
944 #undef x
945 };
946
947 #define BCH_REPLICAS_MAX                4U
948
949 #define BCH_BKEY_PTRS_MAX               16U
950
951 #define BCH_ERROR_ACTIONS()             \
952         x(continue,             0)      \
953         x(fix_safe,             1)      \
954         x(panic,                2)      \
955         x(ro,                   3)
956
957 enum bch_error_actions {
958 #define x(t, n) BCH_ON_ERROR_##t = n,
959         BCH_ERROR_ACTIONS()
960 #undef x
961         BCH_ON_ERROR_NR
962 };
963
964 #define BCH_STR_HASH_TYPES()            \
965         x(crc32c,               0)      \
966         x(crc64,                1)      \
967         x(siphash_old,          2)      \
968         x(siphash,              3)
969
970 enum bch_str_hash_type {
971 #define x(t, n) BCH_STR_HASH_##t = n,
972         BCH_STR_HASH_TYPES()
973 #undef x
974         BCH_STR_HASH_NR
975 };
976
977 #define BCH_STR_HASH_OPTS()             \
978         x(crc32c,               0)      \
979         x(crc64,                1)      \
980         x(siphash,              2)
981
982 enum bch_str_hash_opts {
983 #define x(t, n) BCH_STR_HASH_OPT_##t = n,
984         BCH_STR_HASH_OPTS()
985 #undef x
986         BCH_STR_HASH_OPT_NR
987 };
988
989 #define BCH_CSUM_TYPES()                        \
990         x(none,                         0)      \
991         x(crc32c_nonzero,               1)      \
992         x(crc64_nonzero,                2)      \
993         x(chacha20_poly1305_80,         3)      \
994         x(chacha20_poly1305_128,        4)      \
995         x(crc32c,                       5)      \
996         x(crc64,                        6)      \
997         x(xxhash,                       7)
998
999 enum bch_csum_type {
1000 #define x(t, n) BCH_CSUM_##t = n,
1001         BCH_CSUM_TYPES()
1002 #undef x
1003         BCH_CSUM_NR
1004 };
1005
1006 static const __maybe_unused unsigned bch_crc_bytes[] = {
1007         [BCH_CSUM_none]                         = 0,
1008         [BCH_CSUM_crc32c_nonzero]               = 4,
1009         [BCH_CSUM_crc32c]                       = 4,
1010         [BCH_CSUM_crc64_nonzero]                = 8,
1011         [BCH_CSUM_crc64]                        = 8,
1012         [BCH_CSUM_xxhash]                       = 8,
1013         [BCH_CSUM_chacha20_poly1305_80]         = 10,
1014         [BCH_CSUM_chacha20_poly1305_128]        = 16,
1015 };
1016
1017 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
1018 {
1019         switch (type) {
1020         case BCH_CSUM_chacha20_poly1305_80:
1021         case BCH_CSUM_chacha20_poly1305_128:
1022                 return true;
1023         default:
1024                 return false;
1025         }
1026 }
1027
1028 #define BCH_CSUM_OPTS()                 \
1029         x(none,                 0)      \
1030         x(crc32c,               1)      \
1031         x(crc64,                2)      \
1032         x(xxhash,               3)
1033
1034 enum bch_csum_opts {
1035 #define x(t, n) BCH_CSUM_OPT_##t = n,
1036         BCH_CSUM_OPTS()
1037 #undef x
1038         BCH_CSUM_OPT_NR
1039 };
1040
1041 #define BCH_COMPRESSION_TYPES()         \
1042         x(none,                 0)      \
1043         x(lz4_old,              1)      \
1044         x(gzip,                 2)      \
1045         x(lz4,                  3)      \
1046         x(zstd,                 4)      \
1047         x(incompressible,       5)
1048
1049 enum bch_compression_type {
1050 #define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
1051         BCH_COMPRESSION_TYPES()
1052 #undef x
1053         BCH_COMPRESSION_TYPE_NR
1054 };
1055
1056 #define BCH_COMPRESSION_OPTS()          \
1057         x(none,         0)              \
1058         x(lz4,          1)              \
1059         x(gzip,         2)              \
1060         x(zstd,         3)
1061
1062 enum bch_compression_opts {
1063 #define x(t, n) BCH_COMPRESSION_OPT_##t = n,
1064         BCH_COMPRESSION_OPTS()
1065 #undef x
1066         BCH_COMPRESSION_OPT_NR
1067 };
1068
1069 /*
1070  * Magic numbers
1071  *
1072  * The various other data structures have their own magic numbers, which are
1073  * xored with the first part of the cache set's UUID
1074  */
1075
1076 #define BCACHE_MAGIC                                                    \
1077         UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca,                           \
1078                   0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1079 #define BCHFS_MAGIC                                                     \
1080         UUID_INIT(0xc68573f6, 0x66ce, 0x90a9,                           \
1081                   0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
1082
1083 #define BCACHEFS_STATFS_MAGIC           BCACHEFS_SUPER_MAGIC
1084
1085 #define JSET_MAGIC              __cpu_to_le64(0x245235c1a3625032ULL)
1086 #define BSET_MAGIC              __cpu_to_le64(0x90135c78b99e07f5ULL)
1087
1088 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
1089 {
1090         __le64 ret;
1091
1092         memcpy(&ret, &sb->uuid, sizeof(ret));
1093         return ret;
1094 }
1095
1096 static inline __u64 __jset_magic(struct bch_sb *sb)
1097 {
1098         return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
1099 }
1100
1101 static inline __u64 __bset_magic(struct bch_sb *sb)
1102 {
1103         return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
1104 }
1105
1106 /* Journal */
1107
1108 #define JSET_KEYS_U64s  (sizeof(struct jset_entry) / sizeof(__u64))
1109
1110 #define BCH_JSET_ENTRY_TYPES()                  \
1111         x(btree_keys,           0)              \
1112         x(btree_root,           1)              \
1113         x(prio_ptrs,            2)              \
1114         x(blacklist,            3)              \
1115         x(blacklist_v2,         4)              \
1116         x(usage,                5)              \
1117         x(data_usage,           6)              \
1118         x(clock,                7)              \
1119         x(dev_usage,            8)              \
1120         x(log,                  9)              \
1121         x(overwrite,            10)             \
1122         x(write_buffer_keys,    11)             \
1123         x(datetime,             12)
1124
1125 enum bch_jset_entry_type {
1126 #define x(f, nr)        BCH_JSET_ENTRY_##f      = nr,
1127         BCH_JSET_ENTRY_TYPES()
1128 #undef x
1129         BCH_JSET_ENTRY_NR
1130 };
1131
1132 static inline bool jset_entry_is_key(struct jset_entry *e)
1133 {
1134         switch (e->type) {
1135         case BCH_JSET_ENTRY_btree_keys:
1136         case BCH_JSET_ENTRY_btree_root:
1137         case BCH_JSET_ENTRY_write_buffer_keys:
1138                 return true;
1139         }
1140
1141         return false;
1142 }
1143
1144 /*
1145  * Journal sequence numbers can be blacklisted: bsets record the max sequence
1146  * number of all the journal entries they contain updates for, so that on
1147  * recovery we can ignore those bsets that contain index updates newer that what
1148  * made it into the journal.
1149  *
1150  * This means that we can't reuse that journal_seq - we have to skip it, and
1151  * then record that we skipped it so that the next time we crash and recover we
1152  * don't think there was a missing journal entry.
1153  */
1154 struct jset_entry_blacklist {
1155         struct jset_entry       entry;
1156         __le64                  seq;
1157 };
1158
1159 struct jset_entry_blacklist_v2 {
1160         struct jset_entry       entry;
1161         __le64                  start;
1162         __le64                  end;
1163 };
1164
1165 #define BCH_FS_USAGE_TYPES()                    \
1166         x(reserved,             0)              \
1167         x(inodes,               1)              \
1168         x(key_version,          2)
1169
1170 enum bch_fs_usage_type {
1171 #define x(f, nr)        BCH_FS_USAGE_##f        = nr,
1172         BCH_FS_USAGE_TYPES()
1173 #undef x
1174         BCH_FS_USAGE_NR
1175 };
1176
1177 struct jset_entry_usage {
1178         struct jset_entry       entry;
1179         __le64                  v;
1180 } __packed;
1181
1182 struct jset_entry_data_usage {
1183         struct jset_entry       entry;
1184         __le64                  v;
1185         struct bch_replicas_entry_v1 r;
1186 } __packed;
1187
1188 struct jset_entry_clock {
1189         struct jset_entry       entry;
1190         __u8                    rw;
1191         __u8                    pad[7];
1192         __le64                  time;
1193 } __packed;
1194
1195 struct jset_entry_dev_usage_type {
1196         __le64                  buckets;
1197         __le64                  sectors;
1198         __le64                  fragmented;
1199 } __packed;
1200
1201 struct jset_entry_dev_usage {
1202         struct jset_entry       entry;
1203         __le32                  dev;
1204         __u32                   pad;
1205
1206         __le64                  _buckets_ec;            /* No longer used */
1207         __le64                  _buckets_unavailable;   /* No longer used */
1208
1209         struct jset_entry_dev_usage_type d[];
1210 };
1211
1212 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
1213 {
1214         return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
1215                 sizeof(struct jset_entry_dev_usage_type);
1216 }
1217
1218 struct jset_entry_log {
1219         struct jset_entry       entry;
1220         u8                      d[];
1221 } __packed __aligned(8);
1222
1223 struct jset_entry_datetime {
1224         struct jset_entry       entry;
1225         __le64                  seconds;
1226 } __packed __aligned(8);
1227
1228 /*
1229  * On disk format for a journal entry:
1230  * seq is monotonically increasing; every journal entry has its own unique
1231  * sequence number.
1232  *
1233  * last_seq is the oldest journal entry that still has keys the btree hasn't
1234  * flushed to disk yet.
1235  *
1236  * version is for on disk format changes.
1237  */
1238 struct jset {
1239         struct bch_csum         csum;
1240
1241         __le64                  magic;
1242         __le64                  seq;
1243         __le32                  version;
1244         __le32                  flags;
1245
1246         __le32                  u64s; /* size of d[] in u64s */
1247
1248         __u8                    encrypted_start[0];
1249
1250         __le16                  _read_clock; /* no longer used */
1251         __le16                  _write_clock;
1252
1253         /* Sequence number of oldest dirty journal entry */
1254         __le64                  last_seq;
1255
1256
1257         struct jset_entry       start[0];
1258         __u64                   _data[];
1259 } __packed __aligned(8);
1260
1261 LE32_BITMASK(JSET_CSUM_TYPE,    struct jset, flags, 0, 4);
1262 LE32_BITMASK(JSET_BIG_ENDIAN,   struct jset, flags, 4, 5);
1263 LE32_BITMASK(JSET_NO_FLUSH,     struct jset, flags, 5, 6);
1264
1265 #define BCH_JOURNAL_BUCKETS_MIN         8
1266
1267 /* Btree: */
1268
1269 enum btree_id_flags {
1270         BTREE_ID_EXTENTS        = BIT(0),
1271         BTREE_ID_SNAPSHOTS      = BIT(1),
1272         BTREE_ID_SNAPSHOT_FIELD = BIT(2),
1273         BTREE_ID_DATA           = BIT(3),
1274 };
1275
1276 #define BCH_BTREE_IDS()                                                         \
1277         x(extents,              0,      BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
1278           BIT_ULL(KEY_TYPE_whiteout)|                                           \
1279           BIT_ULL(KEY_TYPE_error)|                                              \
1280           BIT_ULL(KEY_TYPE_cookie)|                                             \
1281           BIT_ULL(KEY_TYPE_extent)|                                             \
1282           BIT_ULL(KEY_TYPE_reservation)|                                        \
1283           BIT_ULL(KEY_TYPE_reflink_p)|                                          \
1284           BIT_ULL(KEY_TYPE_inline_data))                                        \
1285         x(inodes,               1,      BTREE_ID_SNAPSHOTS,                     \
1286           BIT_ULL(KEY_TYPE_whiteout)|                                           \
1287           BIT_ULL(KEY_TYPE_inode)|                                              \
1288           BIT_ULL(KEY_TYPE_inode_v2)|                                           \
1289           BIT_ULL(KEY_TYPE_inode_v3)|                                           \
1290           BIT_ULL(KEY_TYPE_inode_generation))                                   \
1291         x(dirents,              2,      BTREE_ID_SNAPSHOTS,                     \
1292           BIT_ULL(KEY_TYPE_whiteout)|                                           \
1293           BIT_ULL(KEY_TYPE_hash_whiteout)|                                      \
1294           BIT_ULL(KEY_TYPE_dirent))                                             \
1295         x(xattrs,               3,      BTREE_ID_SNAPSHOTS,                     \
1296           BIT_ULL(KEY_TYPE_whiteout)|                                           \
1297           BIT_ULL(KEY_TYPE_cookie)|                                             \
1298           BIT_ULL(KEY_TYPE_hash_whiteout)|                                      \
1299           BIT_ULL(KEY_TYPE_xattr))                                              \
1300         x(alloc,                4,      0,                                      \
1301           BIT_ULL(KEY_TYPE_alloc)|                                              \
1302           BIT_ULL(KEY_TYPE_alloc_v2)|                                           \
1303           BIT_ULL(KEY_TYPE_alloc_v3)|                                           \
1304           BIT_ULL(KEY_TYPE_alloc_v4))                                           \
1305         x(quotas,               5,      0,                                      \
1306           BIT_ULL(KEY_TYPE_quota))                                              \
1307         x(stripes,              6,      0,                                      \
1308           BIT_ULL(KEY_TYPE_stripe))                                             \
1309         x(reflink,              7,      BTREE_ID_EXTENTS|BTREE_ID_DATA,         \
1310           BIT_ULL(KEY_TYPE_reflink_v)|                                          \
1311           BIT_ULL(KEY_TYPE_indirect_inline_data)|                               \
1312           BIT_ULL(KEY_TYPE_error))                                              \
1313         x(subvolumes,           8,      0,                                      \
1314           BIT_ULL(KEY_TYPE_subvolume))                                          \
1315         x(snapshots,            9,      0,                                      \
1316           BIT_ULL(KEY_TYPE_snapshot))                                           \
1317         x(lru,                  10,     0,                                      \
1318           BIT_ULL(KEY_TYPE_set))                                                \
1319         x(freespace,            11,     BTREE_ID_EXTENTS,                       \
1320           BIT_ULL(KEY_TYPE_set))                                                \
1321         x(need_discard,         12,     0,                                      \
1322           BIT_ULL(KEY_TYPE_set))                                                \
1323         x(backpointers,         13,     0,                                      \
1324           BIT_ULL(KEY_TYPE_backpointer))                                        \
1325         x(bucket_gens,          14,     0,                                      \
1326           BIT_ULL(KEY_TYPE_bucket_gens))                                        \
1327         x(snapshot_trees,       15,     0,                                      \
1328           BIT_ULL(KEY_TYPE_snapshot_tree))                                      \
1329         x(deleted_inodes,       16,     BTREE_ID_SNAPSHOT_FIELD,                \
1330           BIT_ULL(KEY_TYPE_set))                                                \
1331         x(logged_ops,           17,     0,                                      \
1332           BIT_ULL(KEY_TYPE_logged_op_truncate)|                                 \
1333           BIT_ULL(KEY_TYPE_logged_op_finsert))                                  \
1334         x(rebalance_work,       18,     BTREE_ID_SNAPSHOT_FIELD,                \
1335           BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie))                       \
1336         x(subvolume_children,   19,     0,                                      \
1337           BIT_ULL(KEY_TYPE_set))                                                \
1338         x(accounting,           20,     BTREE_ID_SNAPSHOT_FIELD,                \
1339           BIT_ULL(KEY_TYPE_accounting))                                         \
1340
1341 enum btree_id {
1342 #define x(name, nr, ...) BTREE_ID_##name = nr,
1343         BCH_BTREE_IDS()
1344 #undef x
1345         BTREE_ID_NR
1346 };
1347
1348 /*
1349  * Maximum number of btrees that we will _ever_ have under the current scheme,
1350  * where we refer to them with 64 bit bitfields - and we also need a bit for
1351  * the interior btree node type:
1352  */
1353 #define BTREE_ID_NR_MAX         63
1354
1355 static inline bool btree_id_is_alloc(enum btree_id id)
1356 {
1357         switch (id) {
1358         case BTREE_ID_alloc:
1359         case BTREE_ID_backpointers:
1360         case BTREE_ID_need_discard:
1361         case BTREE_ID_freespace:
1362         case BTREE_ID_bucket_gens:
1363                 return true;
1364         default:
1365                 return false;
1366         }
1367 }
1368
1369 #define BTREE_MAX_DEPTH         4U
1370
1371 /* Btree nodes */
1372
1373 /*
1374  * Btree nodes
1375  *
1376  * On disk a btree node is a list/log of these; within each set the keys are
1377  * sorted
1378  */
1379 struct bset {
1380         __le64                  seq;
1381
1382         /*
1383          * Highest journal entry this bset contains keys for.
1384          * If on recovery we don't see that journal entry, this bset is ignored:
1385          * this allows us to preserve the order of all index updates after a
1386          * crash, since the journal records a total order of all index updates
1387          * and anything that didn't make it to the journal doesn't get used.
1388          */
1389         __le64                  journal_seq;
1390
1391         __le32                  flags;
1392         __le16                  version;
1393         __le16                  u64s; /* count of d[] in u64s */
1394
1395         struct bkey_packed      start[0];
1396         __u64                   _data[];
1397 } __packed __aligned(8);
1398
1399 LE32_BITMASK(BSET_CSUM_TYPE,    struct bset, flags, 0, 4);
1400
1401 LE32_BITMASK(BSET_BIG_ENDIAN,   struct bset, flags, 4, 5);
1402 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1403                                 struct bset, flags, 5, 6);
1404
1405 /* Sector offset within the btree node: */
1406 LE32_BITMASK(BSET_OFFSET,       struct bset, flags, 16, 32);
1407
1408 struct btree_node {
1409         struct bch_csum         csum;
1410         __le64                  magic;
1411
1412         /* this flags field is encrypted, unlike bset->flags: */
1413         __le64                  flags;
1414
1415         /* Closed interval: */
1416         struct bpos             min_key;
1417         struct bpos             max_key;
1418         struct bch_extent_ptr   _ptr; /* not used anymore */
1419         struct bkey_format      format;
1420
1421         union {
1422         struct bset             keys;
1423         struct {
1424                 __u8            pad[22];
1425                 __le16          u64s;
1426                 __u64           _data[0];
1427
1428         };
1429         };
1430 } __packed __aligned(8);
1431
1432 LE64_BITMASK(BTREE_NODE_ID_LO,  struct btree_node, flags,  0,  4);
1433 LE64_BITMASK(BTREE_NODE_LEVEL,  struct btree_node, flags,  4,  8);
1434 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
1435                                 struct btree_node, flags,  8,  9);
1436 LE64_BITMASK(BTREE_NODE_ID_HI,  struct btree_node, flags,  9, 25);
1437 /* 25-32 unused */
1438 LE64_BITMASK(BTREE_NODE_SEQ,    struct btree_node, flags, 32, 64);
1439
1440 static inline __u64 BTREE_NODE_ID(struct btree_node *n)
1441 {
1442         return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
1443 }
1444
1445 static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
1446 {
1447         SET_BTREE_NODE_ID_LO(n, v);
1448         SET_BTREE_NODE_ID_HI(n, v >> 4);
1449 }
1450
1451 struct btree_node_entry {
1452         struct bch_csum         csum;
1453
1454         union {
1455         struct bset             keys;
1456         struct {
1457                 __u8            pad[22];
1458                 __le16          u64s;
1459                 __u64           _data[0];
1460         };
1461         };
1462 } __packed __aligned(8);
1463
1464 #endif /* _BCACHEFS_FORMAT_H */