bcachefs: Refactor memcpy into direct assignment
[linux-block.git] / fs / bcachefs / bcachefs_format.h
CommitLineData
1c6fdbd8
KO
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_FORMAT_H
3#define _BCACHEFS_FORMAT_H
4
5/*
6 * bcachefs on disk data structures
7 *
8 * OVERVIEW:
9 *
10 * There are three main types of on disk data structures in bcachefs (this is
11 * reduced from 5 in bcache)
12 *
13 * - superblock
14 * - journal
15 * - btree
16 *
17 * The btree is the primary structure; most metadata exists as keys in the
18 * various btrees. There are only a small number of btrees, they're not
19 * sharded - we have one btree for extents, another for inodes, et cetera.
20 *
21 * SUPERBLOCK:
22 *
23 * The superblock contains the location of the journal, the list of devices in
24 * the filesystem, and in general any metadata we need in order to decide
25 * whether we can start a filesystem or prior to reading the journal/btree
26 * roots.
27 *
28 * The superblock is extensible, and most of the contents of the superblock are
29 * in variable length, type tagged fields; see struct bch_sb_field.
30 *
31 * Backup superblocks do not reside in a fixed location; also, superblocks do
32 * not have a fixed size. To locate backup superblocks we have struct
33 * bch_sb_layout; we store a copy of this inside every superblock, and also
34 * before the first superblock.
35 *
36 * JOURNAL:
37 *
38 * The journal primarily records btree updates in the order they occurred;
39 * journal replay consists of just iterating over all the keys in the open
40 * journal entries and re-inserting them into the btrees.
41 *
42 * The journal also contains entry types for the btree roots, and blacklisted
43 * journal sequence numbers (see journal_seq_blacklist.c).
44 *
45 * BTREE:
46 *
47 * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48 * 128k-256k) and log structured. We use struct btree_node for writing the first
49 * entry in a given node (offset 0), and struct btree_node_entry for all
50 * subsequent writes.
51 *
52 * After the header, btree node entries contain a list of keys in sorted order.
53 * Values are stored inline with the keys; since values are variable length (and
54 * keys effectively are variable length too, due to packing) we can't do random
55 * access without building up additional in memory tables in the btree node read
56 * path.
57 *
58 * BTREE KEYS (struct bkey):
59 *
60 * The various btrees share a common format for the key - so as to avoid
61 * switching in fastpath lookup/comparison code - but define their own
62 * structures for the key values.
63 *
64 * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65 * size is just under 2k. The common part also contains a type tag for the
66 * value, and a format field indicating whether the key is packed or not (and
67 * also meant to allow adding new key fields in the future, if desired).
68 *
69 * bkeys, when stored within a btree node, may also be packed. In that case, the
70 * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71 * be generous with field sizes in the common part of the key format (64 bit
72 * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
73 */
74
75#include <asm/types.h>
76#include <asm/byteorder.h>
7121643e 77#include <linux/kernel.h>
1c6fdbd8 78#include <linux/uuid.h>
528b18e6 79#include "vstructs.h"
1c6fdbd8
KO
80
81#ifdef __KERNEL__
82typedef uuid_t __uuid_t;
83#endif
84
3d48a7f8 85#define BITMASK(name, type, field, offset, end) \
96dea3d5
KO
86static const __maybe_unused unsigned name##_OFFSET = offset; \
87static const __maybe_unused unsigned name##_BITS = (end - offset); \
3d48a7f8
KO
88 \
89static inline __u64 name(const type *k) \
90{ \
91 return (k->field >> offset) & ~(~0ULL << (end - offset)); \
92} \
93 \
94static inline void SET_##name(type *k, __u64 v) \
95{ \
96 k->field &= ~(~(~0ULL << (end - offset)) << offset); \
97 k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
98}
99
1c6fdbd8 100#define LE_BITMASK(_bits, name, type, field, offset, end) \
96dea3d5
KO
101static const __maybe_unused unsigned name##_OFFSET = offset; \
102static const __maybe_unused unsigned name##_BITS = (end - offset); \
103static const __maybe_unused __u##_bits name##_MAX = (1ULL << (end - offset)) - 1;\
1c6fdbd8
KO
104 \
105static inline __u64 name(const type *k) \
106{ \
107 return (__le##_bits##_to_cpu(k->field) >> offset) & \
108 ~(~0ULL << (end - offset)); \
109} \
110 \
111static inline void SET_##name(type *k, __u64 v) \
112{ \
113 __u##_bits new = __le##_bits##_to_cpu(k->field); \
114 \
115 new &= ~(~(~0ULL << (end - offset)) << offset); \
116 new |= (v & ~(~0ULL << (end - offset))) << offset; \
117 k->field = __cpu_to_le##_bits(new); \
118}
119
120#define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e)
121#define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e)
122#define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e)
123
124struct bkey_format {
125 __u8 key_u64s;
126 __u8 nr_fields;
127 /* One unused slot for now: */
128 __u8 bits_per_field[6];
129 __le64 field_offset[6];
130};
131
132/* Btree keys - all units are in sectors */
133
134struct bpos {
135 /*
136 * Word order matches machine byte order - btree code treats a bpos as a
137 * single large integer, for search/comparison purposes
138 *
139 * Note that wherever a bpos is embedded in another on disk data
140 * structure, it has to be byte swabbed when reading in metadata that
141 * wasn't written in native endian order:
142 */
143#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
144 __u32 snapshot;
145 __u64 offset;
146 __u64 inode;
147#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
148 __u64 inode;
149 __u64 offset; /* Points to end of extent - sectors */
150 __u32 snapshot;
151#else
152#error edit for your odd byteorder.
153#endif
fd0c7679 154} __packed __aligned(4);
1c6fdbd8
KO
155
156#define KEY_INODE_MAX ((__u64)~0ULL)
157#define KEY_OFFSET_MAX ((__u64)~0ULL)
158#define KEY_SNAPSHOT_MAX ((__u32)~0U)
159#define KEY_SIZE_MAX ((__u32)~0U)
160
e751c01a 161static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
1c6fdbd8 162{
e751c01a
KO
163 return (struct bpos) {
164 .inode = inode,
165 .offset = offset,
166 .snapshot = snapshot,
167 };
1c6fdbd8
KO
168}
169
e751c01a 170#define POS_MIN SPOS(0, 0, 0)
618b1c0e
KO
171#define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
172#define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
e751c01a 173#define POS(_inode, _offset) SPOS(_inode, _offset, 0)
1c6fdbd8
KO
174
175/* Empty placeholder struct, for container_of() */
176struct bch_val {
177 __u64 __nothing[0];
178};
179
180struct bversion {
181#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
182 __u64 lo;
183 __u32 hi;
184#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
185 __u32 hi;
186 __u64 lo;
187#endif
fd0c7679 188} __packed __aligned(4);
1c6fdbd8
KO
189
190struct bkey {
191 /* Size of combined key and value, in u64s */
192 __u8 u64s;
193
194 /* Format of key (0 for format local to btree node) */
195#if defined(__LITTLE_ENDIAN_BITFIELD)
196 __u8 format:7,
197 needs_whiteout:1;
198#elif defined (__BIG_ENDIAN_BITFIELD)
199 __u8 needs_whiteout:1,
200 format:7;
201#else
202#error edit for your odd byteorder.
203#endif
204
205 /* Type of the value */
206 __u8 type;
207
208#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
209 __u8 pad[1];
210
211 struct bversion version;
212 __u32 size; /* extent size, in sectors */
213 struct bpos p;
214#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
215 struct bpos p;
216 __u32 size; /* extent size, in sectors */
217 struct bversion version;
218
219 __u8 pad[1];
220#endif
fd0c7679 221} __packed __aligned(8);
1c6fdbd8
KO
222
223struct bkey_packed {
224 __u64 _data[0];
225
226 /* Size of combined key and value, in u64s */
227 __u8 u64s;
228
229 /* Format of key (0 for format local to btree node) */
230
231 /*
232 * XXX: next incompat on disk format change, switch format and
233 * needs_whiteout - bkey_packed() will be cheaper if format is the high
234 * bits of the bitfield
235 */
236#if defined(__LITTLE_ENDIAN_BITFIELD)
237 __u8 format:7,
238 needs_whiteout:1;
239#elif defined (__BIG_ENDIAN_BITFIELD)
240 __u8 needs_whiteout:1,
241 format:7;
242#endif
243
244 /* Type of the value */
245 __u8 type;
246 __u8 key_start[0];
247
248 /*
249 * We copy bkeys with struct assignment in various places, and while
250 * that shouldn't be done with packed bkeys we can't disallow it in C,
251 * and it's legal to cast a bkey to a bkey_packed - so padding it out
252 * to the same size as struct bkey should hopefully be safest.
253 */
254 __u8 pad[sizeof(struct bkey) - 3];
fd0c7679 255} __packed __aligned(8);
1c6fdbd8 256
653693be
KO
257typedef struct {
258 __le64 lo;
259 __le64 hi;
260} bch_le128;
261
1c6fdbd8 262#define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
cd575ddf
KO
263#define BKEY_U64s_MAX U8_MAX
264#define BKEY_VAL_U64s_MAX (BKEY_U64s_MAX - BKEY_U64s)
265
1c6fdbd8
KO
266#define KEY_PACKED_BITS_START 24
267
268#define KEY_FORMAT_LOCAL_BTREE 0
269#define KEY_FORMAT_CURRENT 1
270
271enum bch_bkey_fields {
272 BKEY_FIELD_INODE,
273 BKEY_FIELD_OFFSET,
274 BKEY_FIELD_SNAPSHOT,
275 BKEY_FIELD_SIZE,
276 BKEY_FIELD_VERSION_HI,
277 BKEY_FIELD_VERSION_LO,
278 BKEY_NR_FIELDS,
279};
280
281#define bkey_format_field(name, field) \
282 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
283
284#define BKEY_FORMAT_CURRENT \
285((struct bkey_format) { \
286 .key_u64s = BKEY_U64s, \
287 .nr_fields = BKEY_NR_FIELDS, \
288 .bits_per_field = { \
289 bkey_format_field(INODE, p.inode), \
290 bkey_format_field(OFFSET, p.offset), \
291 bkey_format_field(SNAPSHOT, p.snapshot), \
292 bkey_format_field(SIZE, size), \
293 bkey_format_field(VERSION_HI, version.hi), \
294 bkey_format_field(VERSION_LO, version.lo), \
295 }, \
296})
297
298/* bkey with inline value */
299struct bkey_i {
300 __u64 _data[0];
301
ac2ccddc
KO
302 struct bkey k;
303 struct bch_val v;
1c6fdbd8
KO
304};
305
306#define KEY(_inode, _offset, _size) \
307((struct bkey) { \
308 .u64s = BKEY_U64s, \
309 .format = KEY_FORMAT_CURRENT, \
310 .p = POS(_inode, _offset), \
311 .size = _size, \
312})
313
314static inline void bkey_init(struct bkey *k)
315{
316 *k = KEY(0, 0, 0);
317}
318
319#define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
320
321#define __BKEY_PADDED(key, pad) \
45dd05b3 322 struct bkey_i key; __u64 key ## _pad[pad]
1c6fdbd8 323
1c6fdbd8
KO
324/*
325 * - DELETED keys are used internally to mark keys that should be ignored but
326 * override keys in composition order. Their version number is ignored.
327 *
328 * - DISCARDED keys indicate that the data is all 0s because it has been
329 * discarded. DISCARDs may have a version; if the version is nonzero the key
330 * will be persistent, otherwise the key will be dropped whenever the btree
331 * node is rewritten (like DELETED keys).
332 *
333 * - ERROR: any read of the data returns a read error, as the data was lost due
334 * to a failing device. Like DISCARDED keys, they can be removed (overridden)
335 * by new writes or cluster-wide GC. Node repair can also overwrite them with
336 * the same or a more recent version number, but not with an older version
337 * number.
26609b61
KO
338 *
339 * - WHITEOUT: for hash table btrees
3e3e02e6 340 */
26609b61
KO
341#define BCH_BKEY_TYPES() \
342 x(deleted, 0) \
7a7d17b2 343 x(whiteout, 1) \
26609b61
KO
344 x(error, 2) \
345 x(cookie, 3) \
79f88eba 346 x(hash_whiteout, 4) \
26609b61
KO
347 x(btree_ptr, 5) \
348 x(extent, 6) \
349 x(reservation, 7) \
350 x(inode, 8) \
351 x(inode_generation, 9) \
352 x(dirent, 10) \
353 x(xattr, 11) \
354 x(alloc, 12) \
355 x(quota, 13) \
76426098
KO
356 x(stripe, 14) \
357 x(reflink_p, 15) \
4be1a412 358 x(reflink_v, 16) \
548b3d20 359 x(inline_data, 17) \
801a3de6 360 x(btree_ptr_v2, 18) \
7f4e1d5d 361 x(indirect_inline_data, 19) \
14b393ee
KO
362 x(alloc_v2, 20) \
363 x(subvolume, 21) \
3e52c222
KO
364 x(snapshot, 22) \
365 x(inode_v2, 23) \
179e3434 366 x(alloc_v3, 24) \
d326ab2f 367 x(set, 25) \
3d48a7f8 368 x(lru, 26) \
a8c752bb 369 x(alloc_v4, 27) \
8dd69d9f 370 x(backpointer, 28) \
5250b74d 371 x(inode_v3, 29) \
1c59b483 372 x(bucket_gens, 30) \
b030e262 373 x(snapshot_tree, 31) \
f3e374ef
KO
374 x(logged_op_truncate, 32) \
375 x(logged_op_finsert, 33)
26609b61
KO
376
377enum bch_bkey_type {
378#define x(name, nr) KEY_TYPE_##name = nr,
379 BCH_BKEY_TYPES()
380#undef x
381 KEY_TYPE_MAX,
382};
1c6fdbd8 383
79f88eba
KO
384struct bch_deleted {
385 struct bch_val v;
386};
387
7a7d17b2 388struct bch_whiteout {
79f88eba
KO
389 struct bch_val v;
390};
391
392struct bch_error {
393 struct bch_val v;
394};
395
1c6fdbd8
KO
396struct bch_cookie {
397 struct bch_val v;
398 __le64 cookie;
399};
1c6fdbd8 400
79f88eba
KO
401struct bch_hash_whiteout {
402 struct bch_val v;
403};
404
179e3434
KO
405struct bch_set {
406 struct bch_val v;
407};
408
1c6fdbd8
KO
409/* Extents */
410
411/*
412 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
413 * preceded by checksum/compression information (bch_extent_crc32 or
414 * bch_extent_crc64).
415 *
416 * One major determining factor in the format of extents is how we handle and
417 * represent extents that have been partially overwritten and thus trimmed:
418 *
419 * If an extent is not checksummed or compressed, when the extent is trimmed we
420 * don't have to remember the extent we originally allocated and wrote: we can
421 * merely adjust ptr->offset to point to the start of the data that is currently
422 * live. The size field in struct bkey records the current (live) size of the
423 * extent, and is also used to mean "size of region on disk that we point to" in
424 * this case.
425 *
426 * Thus an extent that is not checksummed or compressed will consist only of a
427 * list of bch_extent_ptrs, with none of the fields in
428 * bch_extent_crc32/bch_extent_crc64.
429 *
430 * When an extent is checksummed or compressed, it's not possible to read only
431 * the data that is currently live: we have to read the entire extent that was
432 * originally written, and then return only the part of the extent that is
433 * currently live.
434 *
435 * Thus, in addition to the current size of the extent in struct bkey, we need
436 * to store the size of the originally allocated space - this is the
437 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
438 * when the extent is trimmed, instead of modifying the offset field of the
439 * pointer, we keep a second smaller offset field - "offset into the original
440 * extent of the currently live region".
441 *
442 * The other major determining factor is replication and data migration:
443 *
444 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
445 * write, we will initially write all the replicas in the same format, with the
446 * same checksum type and compression format - however, when copygc runs later (or
447 * tiering/cache promotion, anything that moves data), it is not in general
448 * going to rewrite all the pointers at once - one of the replicas may be in a
449 * bucket on one device that has very little fragmentation while another lives
450 * in a bucket that has become heavily fragmented, and thus is being rewritten
451 * sooner than the rest.
452 *
453 * Thus it will only move a subset of the pointers (or in the case of
454 * tiering/cache promotion perhaps add a single pointer without dropping any
455 * current pointers), and if the extent has been partially overwritten it must
456 * write only the currently live portion (or copygc would not be able to reduce
457 * fragmentation!) - which necessitates a different bch_extent_crc format for
458 * the new pointer.
459 *
460 * But in the interests of space efficiency, we don't want to store one
461 * bch_extent_crc for each pointer if we don't have to.
462 *
463 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
464 * bch_extent_ptrs appended arbitrarily one after the other. We determine the
465 * type of a given entry with a scheme similar to utf8 (except we're encoding a
466 * type, not a size), encoding the type in the position of the first set bit:
467 *
468 * bch_extent_crc32 - 0b1
469 * bch_extent_ptr - 0b10
470 * bch_extent_crc64 - 0b100
471 *
472 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
473 * bch_extent_crc64 is the least constrained).
474 *
475 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
476 * until the next bch_extent_crc32/64.
477 *
478 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
479 * is neither checksummed nor compressed.
480 */
481
482/* 128 bits, sufficient for cryptographic MACs: */
483struct bch_csum {
484 __le64 lo;
485 __le64 hi;
fd0c7679 486} __packed __aligned(8);
1c6fdbd8 487
abce30b7
KO
488#define BCH_EXTENT_ENTRY_TYPES() \
489 x(ptr, 0) \
490 x(crc32, 1) \
491 x(crc64, 2) \
cd575ddf 492 x(crc128, 3) \
2766876d
KO
493 x(stripe_ptr, 4) \
494 x(rebalance, 5)
495#define BCH_EXTENT_ENTRY_MAX 6
abce30b7 496
1c6fdbd8 497enum bch_extent_entry_type {
abce30b7
KO
498#define x(f, n) BCH_EXTENT_ENTRY_##f = n,
499 BCH_EXTENT_ENTRY_TYPES()
500#undef x
1c6fdbd8
KO
501};
502
1c6fdbd8
KO
503/* Compressed/uncompressed size are stored biased by 1: */
504struct bch_extent_crc32 {
505#if defined(__LITTLE_ENDIAN_BITFIELD)
506 __u32 type:2,
507 _compressed_size:7,
508 _uncompressed_size:7,
509 offset:7,
510 _unused:1,
511 csum_type:4,
512 compression_type:4;
513 __u32 csum;
514#elif defined (__BIG_ENDIAN_BITFIELD)
515 __u32 csum;
516 __u32 compression_type:4,
517 csum_type:4,
518 _unused:1,
519 offset:7,
520 _uncompressed_size:7,
521 _compressed_size:7,
522 type:2;
523#endif
fd0c7679 524} __packed __aligned(8);
1c6fdbd8
KO
525
526#define CRC32_SIZE_MAX (1U << 7)
527#define CRC32_NONCE_MAX 0
528
529struct bch_extent_crc64 {
530#if defined(__LITTLE_ENDIAN_BITFIELD)
531 __u64 type:3,
532 _compressed_size:9,
533 _uncompressed_size:9,
534 offset:9,
535 nonce:10,
536 csum_type:4,
537 compression_type:4,
538 csum_hi:16;
539#elif defined (__BIG_ENDIAN_BITFIELD)
540 __u64 csum_hi:16,
541 compression_type:4,
542 csum_type:4,
543 nonce:10,
544 offset:9,
545 _uncompressed_size:9,
546 _compressed_size:9,
547 type:3;
548#endif
549 __u64 csum_lo;
fd0c7679 550} __packed __aligned(8);
1c6fdbd8
KO
551
552#define CRC64_SIZE_MAX (1U << 9)
553#define CRC64_NONCE_MAX ((1U << 10) - 1)
554
555struct bch_extent_crc128 {
556#if defined(__LITTLE_ENDIAN_BITFIELD)
557 __u64 type:4,
558 _compressed_size:13,
559 _uncompressed_size:13,
560 offset:13,
561 nonce:13,
562 csum_type:4,
563 compression_type:4;
564#elif defined (__BIG_ENDIAN_BITFIELD)
565 __u64 compression_type:4,
566 csum_type:4,
567 nonce:13,
568 offset:13,
569 _uncompressed_size:13,
570 _compressed_size:13,
571 type:4;
572#endif
573 struct bch_csum csum;
fd0c7679 574} __packed __aligned(8);
1c6fdbd8
KO
575
576#define CRC128_SIZE_MAX (1U << 13)
577#define CRC128_NONCE_MAX ((1U << 13) - 1)
578
579/*
580 * @reservation - pointer hasn't been written to, just reserved
581 */
582struct bch_extent_ptr {
583#if defined(__LITTLE_ENDIAN_BITFIELD)
584 __u64 type:1,
585 cached:1,
cd575ddf 586 unused:1,
79203111 587 unwritten:1,
1c6fdbd8
KO
588 offset:44, /* 8 petabytes */
589 dev:8,
590 gen:8;
591#elif defined (__BIG_ENDIAN_BITFIELD)
592 __u64 gen:8,
593 dev:8,
594 offset:44,
79203111 595 unwritten:1,
cd575ddf 596 unused:1,
1c6fdbd8
KO
597 cached:1,
598 type:1;
599#endif
fd0c7679 600} __packed __aligned(8);
1c6fdbd8 601
cd575ddf 602struct bch_extent_stripe_ptr {
1c6fdbd8
KO
603#if defined(__LITTLE_ENDIAN_BITFIELD)
604 __u64 type:5,
cd575ddf 605 block:8,
7f4e1d5d
KO
606 redundancy:4,
607 idx:47;
cd575ddf 608#elif defined (__BIG_ENDIAN_BITFIELD)
7f4e1d5d
KO
609 __u64 idx:47,
610 redundancy:4,
cd575ddf
KO
611 block:8,
612 type:5;
613#endif
614};
615
616struct bch_extent_reservation {
617#if defined(__LITTLE_ENDIAN_BITFIELD)
618 __u64 type:6,
619 unused:22,
1c6fdbd8
KO
620 replicas:4,
621 generation:32;
622#elif defined (__BIG_ENDIAN_BITFIELD)
623 __u64 generation:32,
624 replicas:4,
cd575ddf
KO
625 unused:22,
626 type:6;
1c6fdbd8
KO
627#endif
628};
629
2766876d
KO
630struct bch_extent_rebalance {
631#if defined(__LITTLE_ENDIAN_BITFIELD)
632 __u64 type:7,
633 unused:33,
634 compression:8,
635 target:16;
636#elif defined (__BIG_ENDIAN_BITFIELD)
637 __u64 target:16,
638 compression:8,
639 unused:33,
640 type:7;
641#endif
642};
643
1c6fdbd8
KO
644union bch_extent_entry {
645#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
646 unsigned long type;
647#elif __BITS_PER_LONG == 32
648 struct {
649 unsigned long pad;
650 unsigned long type;
651 };
652#else
653#error edit for your odd byteorder.
654#endif
abce30b7
KO
655
656#define x(f, n) struct bch_extent_##f f;
657 BCH_EXTENT_ENTRY_TYPES()
658#undef x
1c6fdbd8
KO
659};
660
26609b61
KO
661struct bch_btree_ptr {
662 struct bch_val v;
1c6fdbd8 663
26609b61
KO
664 __u64 _data[0];
665 struct bch_extent_ptr start[];
fd0c7679 666} __packed __aligned(8);
1c6fdbd8 667
548b3d20
KO
668struct bch_btree_ptr_v2 {
669 struct bch_val v;
670
671 __u64 mem_ptr;
672 __le64 seq;
673 __le16 sectors_written;
51d2dfb8 674 __le16 flags;
548b3d20
KO
675 struct bpos min_key;
676 __u64 _data[0];
677 struct bch_extent_ptr start[];
fd0c7679 678} __packed __aligned(8);
548b3d20 679
51d2dfb8
KO
680LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
681
1c6fdbd8
KO
682struct bch_extent {
683 struct bch_val v;
684
685 __u64 _data[0];
686 union bch_extent_entry start[];
fd0c7679 687} __packed __aligned(8);
1c6fdbd8
KO
688
689struct bch_reservation {
690 struct bch_val v;
691
692 __le32 generation;
693 __u8 nr_replicas;
694 __u8 pad[3];
fd0c7679 695} __packed __aligned(8);
1c6fdbd8
KO
696
697/* Maximum size (in u64s) a single pointer could be: */
698#define BKEY_EXTENT_PTR_U64s_MAX\
699 ((sizeof(struct bch_extent_crc128) + \
a5cf5a4b 700 sizeof(struct bch_extent_ptr)) / sizeof(__u64))
1c6fdbd8
KO
701
702/* Maximum possible size of an entire extent value: */
703#define BKEY_EXTENT_VAL_U64s_MAX \
5055b509 704 (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
1c6fdbd8 705
1c6fdbd8
KO
706/* * Maximum possible size of an entire extent, key + value: */
707#define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
708
709/* Btree pointers don't carry around checksums: */
710#define BKEY_BTREE_PTR_VAL_U64s_MAX \
548b3d20 711 ((sizeof(struct bch_btree_ptr_v2) + \
a5cf5a4b 712 sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
1c6fdbd8
KO
713#define BKEY_BTREE_PTR_U64s_MAX \
714 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
715
716/* Inodes */
717
718#define BLOCKDEV_INODE_MAX 4096
719
720#define BCACHEFS_ROOT_INO 4096
721
1c6fdbd8
KO
722struct bch_inode {
723 struct bch_val v;
724
725 __le64 bi_hash_seed;
726 __le32 bi_flags;
727 __le16 bi_mode;
5cfd6977 728 __u8 fields[];
fd0c7679 729} __packed __aligned(8);
1c6fdbd8 730
3e52c222
KO
731struct bch_inode_v2 {
732 struct bch_val v;
733
734 __le64 bi_journal_seq;
735 __le64 bi_hash_seed;
736 __le64 bi_flags;
737 __le16 bi_mode;
5cfd6977 738 __u8 fields[];
fd0c7679 739} __packed __aligned(8);
3e52c222 740
8dd69d9f
KO
741struct bch_inode_v3 {
742 struct bch_val v;
743
744 __le64 bi_journal_seq;
745 __le64 bi_hash_seed;
746 __le64 bi_flags;
747 __le64 bi_sectors;
748 __le64 bi_size;
749 __le64 bi_version;
5cfd6977 750 __u8 fields[];
8dd69d9f
KO
751} __packed __aligned(8);
752
753#define INODEv3_FIELDS_START_INITIAL 6
a5cf5a4b 754#define INODEv3_FIELDS_START_CUR (offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
8dd69d9f 755
1c6fdbd8
KO
756struct bch_inode_generation {
757 struct bch_val v;
758
759 __le32 bi_generation;
760 __le32 pad;
fd0c7679 761} __packed __aligned(8);
1c6fdbd8 762
14b393ee
KO
763/*
764 * bi_subvol and bi_parent_subvol are only set for subvolume roots:
765 */
766
8dd69d9f 767#define BCH_INODE_FIELDS_v2() \
a3e72262
KO
768 x(bi_atime, 96) \
769 x(bi_ctime, 96) \
770 x(bi_mtime, 96) \
771 x(bi_otime, 96) \
a3e70fb2
KO
772 x(bi_size, 64) \
773 x(bi_sectors, 64) \
774 x(bi_uid, 32) \
775 x(bi_gid, 32) \
776 x(bi_nlink, 32) \
777 x(bi_generation, 32) \
778 x(bi_dev, 32) \
779 x(bi_data_checksum, 8) \
780 x(bi_compression, 8) \
781 x(bi_project, 32) \
782 x(bi_background_compression, 8) \
783 x(bi_data_replicas, 8) \
784 x(bi_promote_target, 16) \
785 x(bi_foreground_target, 16) \
786 x(bi_background_target, 16) \
721d4ad8 787 x(bi_erasure_code, 16) \
ab2a29cc
KO
788 x(bi_fields_set, 16) \
789 x(bi_dir, 64) \
14b393ee
KO
790 x(bi_dir_offset, 64) \
791 x(bi_subvol, 32) \
792 x(bi_parent_subvol, 32)
a3e70fb2 793
8dd69d9f
KO
794#define BCH_INODE_FIELDS_v3() \
795 x(bi_atime, 96) \
796 x(bi_ctime, 96) \
797 x(bi_mtime, 96) \
798 x(bi_otime, 96) \
799 x(bi_uid, 32) \
800 x(bi_gid, 32) \
801 x(bi_nlink, 32) \
802 x(bi_generation, 32) \
803 x(bi_dev, 32) \
804 x(bi_data_checksum, 8) \
805 x(bi_compression, 8) \
806 x(bi_project, 32) \
807 x(bi_background_compression, 8) \
808 x(bi_data_replicas, 8) \
809 x(bi_promote_target, 16) \
810 x(bi_foreground_target, 16) \
811 x(bi_background_target, 16) \
812 x(bi_erasure_code, 16) \
813 x(bi_fields_set, 16) \
814 x(bi_dir, 64) \
815 x(bi_dir_offset, 64) \
816 x(bi_subvol, 32) \
a8b3a677
KO
817 x(bi_parent_subvol, 32) \
818 x(bi_nocow, 8)
8dd69d9f 819
d42dd4ad
KO
820/* subset of BCH_INODE_FIELDS */
821#define BCH_INODE_OPTS() \
822 x(data_checksum, 8) \
823 x(compression, 8) \
824 x(project, 32) \
825 x(background_compression, 8) \
826 x(data_replicas, 8) \
827 x(promote_target, 16) \
828 x(foreground_target, 16) \
829 x(background_target, 16) \
a8b3a677
KO
830 x(erasure_code, 16) \
831 x(nocow, 8)
1c6fdbd8 832
721d4ad8
KO
833enum inode_opt_id {
834#define x(name, ...) \
835 Inode_opt_##name,
836 BCH_INODE_OPTS()
837#undef x
838 Inode_opt_nr,
839};
840
1c6fdbd8
KO
841enum {
842 /*
843 * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
844 * flags)
845 */
3e3e02e6
KO
846 __BCH_INODE_SYNC = 0,
847 __BCH_INODE_IMMUTABLE = 1,
848 __BCH_INODE_APPEND = 2,
849 __BCH_INODE_NODUMP = 3,
850 __BCH_INODE_NOATIME = 4,
851
b030e262
KO
852 __BCH_INODE_I_SIZE_DIRTY = 5, /* obsolete */
853 __BCH_INODE_I_SECTORS_DIRTY = 6, /* obsolete */
3e3e02e6
KO
854 __BCH_INODE_UNLINKED = 7,
855 __BCH_INODE_BACKPTR_UNTRUSTED = 8,
1c6fdbd8
KO
856
857 /* bits 20+ reserved for packed fields below: */
858};
859
860#define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
861#define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
862#define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
863#define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
864#define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
865#define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
866#define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
867#define BCH_INODE_UNLINKED (1 << __BCH_INODE_UNLINKED)
ab2a29cc 868#define BCH_INODE_BACKPTR_UNTRUSTED (1 << __BCH_INODE_BACKPTR_UNTRUSTED)
1c6fdbd8
KO
869
870LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
a3e72262
KO
871LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31);
872LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
1c6fdbd8 873
3e52c222
KO
874LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
875LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31);
876
8dd69d9f
KO
877LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24);
878LE64_BITMASK(INODEv3_NR_FIELDS, struct bch_inode_v3, bi_flags, 24, 31);
879
880LE64_BITMASK(INODEv3_FIELDS_START,
881 struct bch_inode_v3, bi_flags, 31, 36);
882LE64_BITMASK(INODEv3_MODE, struct bch_inode_v3, bi_flags, 36, 52);
883
1c6fdbd8
KO
884/* Dirents */
885
886/*
887 * Dirents (and xattrs) have to implement string lookups; since our b-tree
888 * doesn't support arbitrary length strings for the key, we instead index by a
889 * 64 bit hash (currently truncated sha1) of the string, stored in the offset
890 * field of the key - using linear probing to resolve hash collisions. This also
891 * provides us with the readdir cookie posix requires.
892 *
893 * Linear probing requires us to use whiteouts for deletions, in the event of a
894 * collision:
895 */
896
1c6fdbd8
KO
897struct bch_dirent {
898 struct bch_val v;
899
900 /* Target inode number: */
4db65027 901 union {
1c6fdbd8 902 __le64 d_inum;
4db65027
KO
903 struct { /* DT_SUBVOL */
904 __le32 d_child_subvol;
905 __le32 d_parent_subvol;
906 };
907 };
1c6fdbd8
KO
908
909 /*
910 * Copy of mode bits 12-15 from the target inode - so userspace can get
911 * the filetype without having to do a stat()
912 */
913 __u8 d_type;
914
915 __u8 d_name[];
fd0c7679 916} __packed __aligned(8);
1c6fdbd8 917
14b393ee
KO
918#define DT_SUBVOL 16
919#define BCH_DT_MAX 17
920
a125c074 921#define BCH_NAME_MAX 512
1c6fdbd8
KO
922
923/* Xattrs */
924
26609b61
KO
925#define KEY_TYPE_XATTR_INDEX_USER 0
926#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
927#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
928#define KEY_TYPE_XATTR_INDEX_TRUSTED 3
929#define KEY_TYPE_XATTR_INDEX_SECURITY 4
1c6fdbd8
KO
930
931struct bch_xattr {
932 struct bch_val v;
933 __u8 x_type;
934 __u8 x_name_len;
935 __le16 x_val_len;
936 __u8 x_name[];
fd0c7679 937} __packed __aligned(8);
1c6fdbd8
KO
938
939/* Bucket/allocation information: */
940
1c6fdbd8
KO
941struct bch_alloc {
942 struct bch_val v;
943 __u8 fields;
944 __u8 gen;
945 __u8 data[];
fd0c7679 946} __packed __aligned(8);
1c6fdbd8 947
7f4e1d5d 948#define BCH_ALLOC_FIELDS_V1() \
8fe826f9
KO
949 x(read_time, 16) \
950 x(write_time, 16) \
951 x(data_type, 8) \
952 x(dirty_sectors, 16) \
953 x(cached_sectors, 16) \
7f4e1d5d
KO
954 x(oldest_gen, 8) \
955 x(stripe, 32) \
956 x(stripe_redundancy, 8)
957
a8c752bb
KO
958enum {
959#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
960 BCH_ALLOC_FIELDS_V1()
961#undef x
962};
963
7f4e1d5d
KO
964struct bch_alloc_v2 {
965 struct bch_val v;
966 __u8 nr_fields;
967 __u8 gen;
968 __u8 oldest_gen;
969 __u8 data_type;
970 __u8 data[];
fd0c7679 971} __packed __aligned(8);
7f4e1d5d
KO
972
973#define BCH_ALLOC_FIELDS_V2() \
974 x(read_time, 64) \
975 x(write_time, 64) \
66d90823
KO
976 x(dirty_sectors, 32) \
977 x(cached_sectors, 32) \
7f4e1d5d
KO
978 x(stripe, 32) \
979 x(stripe_redundancy, 8)
90541a74 980
3e52c222
KO
981struct bch_alloc_v3 {
982 struct bch_val v;
983 __le64 journal_seq;
984 __le32 flags;
985 __u8 nr_fields;
986 __u8 gen;
987 __u8 oldest_gen;
988 __u8 data_type;
989 __u8 data[];
fd0c7679 990} __packed __aligned(8);
3e52c222 991
a8c752bb
KO
992LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
993LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
994
3d48a7f8
KO
995struct bch_alloc_v4 {
996 struct bch_val v;
997 __u64 journal_seq;
998 __u32 flags;
999 __u8 gen;
1000 __u8 oldest_gen;
1001 __u8 data_type;
1002 __u8 stripe_redundancy;
1003 __u32 dirty_sectors;
1004 __u32 cached_sectors;
1005 __u64 io_time[2];
1006 __u32 stripe;
1007 __u32 nr_external_backpointers;
80c33085 1008 __u64 fragmentation_lru;
fd0c7679 1009} __packed __aligned(8);
3d48a7f8 1010
19a614d2 1011#define BCH_ALLOC_V4_U64s_V0 6
a5cf5a4b 1012#define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(__u64))
19a614d2 1013
3d48a7f8
KO
1014BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1)
1015BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2)
1016BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8)
1017BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14)
1018
a8c752bb
KO
1019#define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX 40
1020
1021struct bch_backpointer {
1022 struct bch_val v;
1023 __u8 btree_id;
1024 __u8 level;
1025 __u8 data_type;
1026 __u64 bucket_offset:40;
1027 __u32 bucket_len;
1028 struct bpos pos;
1029} __packed __aligned(8);
90541a74 1030
5250b74d
KO
1031#define KEY_TYPE_BUCKET_GENS_BITS 8
1032#define KEY_TYPE_BUCKET_GENS_NR (1U << KEY_TYPE_BUCKET_GENS_BITS)
1033#define KEY_TYPE_BUCKET_GENS_MASK (KEY_TYPE_BUCKET_GENS_NR - 1)
1034
1035struct bch_bucket_gens {
1036 struct bch_val v;
1037 u8 gens[KEY_TYPE_BUCKET_GENS_NR];
1038} __packed __aligned(8);
1039
1c6fdbd8
KO
1040/* Quotas: */
1041
1c6fdbd8
KO
1042enum quota_types {
1043 QTYP_USR = 0,
1044 QTYP_GRP = 1,
1045 QTYP_PRJ = 2,
1046 QTYP_NR = 3,
1047};
1048
1049enum quota_counters {
1050 Q_SPC = 0,
1051 Q_INO = 1,
1052 Q_COUNTERS = 2,
1053};
1054
1055struct bch_quota_counter {
1056 __le64 hardlimit;
1057 __le64 softlimit;
1058};
1059
1060struct bch_quota {
1061 struct bch_val v;
1062 struct bch_quota_counter c[Q_COUNTERS];
fd0c7679 1063} __packed __aligned(8);
1c6fdbd8 1064
cd575ddf
KO
1065/* Erasure coding */
1066
cd575ddf
KO
1067struct bch_stripe {
1068 struct bch_val v;
1069 __le16 sectors;
1070 __u8 algorithm;
1071 __u8 nr_blocks;
1072 __u8 nr_redundant;
1073
1074 __u8 csum_granularity_bits;
1075 __u8 csum_type;
1076 __u8 pad;
1077
81d8599e 1078 struct bch_extent_ptr ptrs[];
fd0c7679 1079} __packed __aligned(8);
cd575ddf 1080
76426098
KO
1081/* Reflink: */
1082
1083struct bch_reflink_p {
1084 struct bch_val v;
1085 __le64 idx;
6d76aefe
KO
1086 /*
1087 * A reflink pointer might point to an indirect extent which is then
1088 * later split (by copygc or rebalance). If we only pointed to part of
1089 * the original indirect extent, and then one of the fragments is
1090 * outside the range we point to, we'd leak a refcount: so when creating
1091 * reflink pointers, we need to store pad values to remember the full
1092 * range we were taking a reference on.
1093 */
1094 __le32 front_pad;
1095 __le32 back_pad;
fd0c7679 1096} __packed __aligned(8);
76426098
KO
1097
1098struct bch_reflink_v {
1099 struct bch_val v;
1100 __le64 refcount;
1101 union bch_extent_entry start[0];
5cfd6977 1102 __u64 _data[];
fd0c7679 1103} __packed __aligned(8);
76426098 1104
801a3de6
KO
1105struct bch_indirect_inline_data {
1106 struct bch_val v;
1107 __le64 refcount;
5cfd6977 1108 u8 data[];
801a3de6
KO
1109};
1110
4be1a412
KO
1111/* Inline data */
1112
1113struct bch_inline_data {
1114 struct bch_val v;
5cfd6977 1115 u8 data[];
4be1a412
KO
1116};
1117
14b393ee
KO
1118/* Subvolumes: */
1119
1120#define SUBVOL_POS_MIN POS(0, 1)
1121#define SUBVOL_POS_MAX POS(0, S32_MAX)
1122#define BCACHEFS_ROOT_SUBVOL 1
1123
1124struct bch_subvolume {
1125 struct bch_val v;
1126 __le32 flags;
1127 __le32 snapshot;
1128 __le64 inode;
8e877caa
KO
1129 /*
1130 * Snapshot subvolumes form a tree, separate from the snapshot nodes
1131 * tree - if this subvolume is a snapshot, this is the ID of the
1132 * subvolume it was created from:
1133 */
653693be
KO
1134 __le32 parent;
1135 __le32 pad;
1136 bch_le128 otime;
14b393ee
KO
1137};
1138
1139LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1)
1140/*
1141 * We need to know whether a subvolume is a snapshot so we can know whether we
1142 * can delete it (or whether it should just be rm -rf'd)
1143 */
1144LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2)
2027875b 1145LE32_BITMASK(BCH_SUBVOLUME_UNLINKED, struct bch_subvolume, flags, 2, 3)
14b393ee
KO
1146
1147/* Snapshots */
1148
1149struct bch_snapshot {
1150 struct bch_val v;
1151 __le32 flags;
1152 __le32 parent;
1153 __le32 children[2];
1154 __le32 subvol;
f55d6e07 1155 /* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
1c59b483 1156 __le32 tree;
f26c67f4
KO
1157 __le32 depth;
1158 __le32 skip[3];
14b393ee
KO
1159};
1160
1161LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1)
1162
1163/* True if a subvolume points to this snapshot node: */
1164LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
1165
1c59b483
KO
1166/*
1167 * Snapshot trees:
1168 *
1169 * The snapshot_trees btree gives us persistent indentifier for each tree of
1170 * bch_snapshot nodes, and allow us to record and easily find the root/master
1171 * subvolume that other snapshots were created from:
1172 */
1173struct bch_snapshot_tree {
1174 struct bch_val v;
1175 __le32 master_subvol;
1176 __le32 root_snapshot;
1177};
1178
d326ab2f
KO
1179/* LRU btree: */
1180
1181struct bch_lru {
1182 struct bch_val v;
1183 __le64 idx;
fd0c7679 1184} __packed __aligned(8);
d326ab2f
KO
1185
1186#define LRU_ID_STRIPES (1U << 16)
1187
b030e262
KO
1188/* Logged operations btree: */
1189
1190struct bch_logged_op_truncate {
1191 struct bch_val v;
1192 __le32 subvol;
1193 __le32 pad;
1194 __le64 inum;
1195 __le64 new_i_size;
1196};
1197
f3e374ef
KO
1198enum logged_op_finsert_state {
1199 LOGGED_OP_FINSERT_start,
1200 LOGGED_OP_FINSERT_shift_extents,
1201 LOGGED_OP_FINSERT_finish,
1202};
1203
1204struct bch_logged_op_finsert {
1205 struct bch_val v;
1206 __u8 state;
1207 __u8 pad[3];
1208 __le32 subvol;
1209 __le64 inum;
1210 __le64 dst_offset;
1211 __le64 src_offset;
1212 __le64 pos;
1213};
1214
1c6fdbd8
KO
1215/* Optional/variable size superblock sections: */
1216
1217struct bch_sb_field {
1218 __u64 _data[0];
1219 __le32 u64s;
1220 __le32 type;
1221};
1222
25be2e5d
KO
1223#define BCH_SB_FIELDS() \
1224 x(journal, 0) \
9af26120 1225 x(members_v1, 1) \
25be2e5d
KO
1226 x(crypt, 2) \
1227 x(replicas_v0, 3) \
1228 x(quota, 4) \
1229 x(disk_groups, 5) \
1230 x(clean, 6) \
1231 x(replicas, 7) \
1232 x(journal_seq_blacklist, 8) \
104c6974 1233 x(journal_v2, 9) \
3f7b9713
HS
1234 x(counters, 10) \
1235 x(members_v2, 11)
1c6fdbd8
KO
1236
1237enum bch_sb_field_type {
1238#define x(f, nr) BCH_SB_FIELD_##f = nr,
1239 BCH_SB_FIELDS()
1240#undef x
1241 BCH_SB_FIELD_NR
1242};
1243
25be2e5d
KO
1244/*
1245 * Most superblock fields are replicated in all device's superblocks - a few are
1246 * not:
1247 */
1248#define BCH_SINGLE_DEVICE_SB_FIELDS \
1249 ((1U << BCH_SB_FIELD_journal)| \
1250 (1U << BCH_SB_FIELD_journal_v2))
1251
1c6fdbd8
KO
1252/* BCH_SB_FIELD_journal: */
1253
1254struct bch_sb_field_journal {
1255 struct bch_sb_field field;
5cfd6977 1256 __le64 buckets[];
1c6fdbd8
KO
1257};
1258
25be2e5d
KO
1259struct bch_sb_field_journal_v2 {
1260 struct bch_sb_field field;
1261
1262 struct bch_sb_field_journal_v2_entry {
1263 __le64 start;
1264 __le64 nr;
5cfd6977 1265 } d[];
25be2e5d
KO
1266};
1267
9af26120 1268/* BCH_SB_FIELD_members_v1: */
1c6fdbd8 1269
8b335bae
KO
1270#define BCH_MIN_NR_NBUCKETS (1 << 6)
1271
40f7914e
HS
1272#define BCH_IOPS_MEASUREMENTS() \
1273 x(seqread, 0) \
1274 x(seqwrite, 1) \
1275 x(randread, 2) \
1276 x(randwrite, 3)
1277
1278enum bch_iops_measurement {
1279#define x(t, n) BCH_IOPS_##t = n,
1280 BCH_IOPS_MEASUREMENTS()
1281#undef x
1282 BCH_IOPS_NR
1283};
1284
1c6fdbd8
KO
1285struct bch_member {
1286 __uuid_t uuid;
1287 __le64 nbuckets; /* device size */
1288 __le16 first_bucket; /* index of first bucket used */
1289 __le16 bucket_size; /* sectors */
1290 __le32 pad;
1291 __le64 last_mount; /* time_t */
1292
40f7914e
HS
1293 __le64 flags;
1294 __le32 iops[4];
1c6fdbd8
KO
1295};
1296
3f7b9713
HS
1297#define BCH_MEMBER_V1_BYTES 56
1298
40f7914e 1299LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags, 0, 4)
7243498d 1300/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
40f7914e
HS
1301LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags, 14, 15)
1302LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags, 15, 20)
1303LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags, 20, 28)
1304LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags, 28, 30)
c6b2826c 1305LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
40f7914e 1306 struct bch_member, flags, 30, 31)
1c6fdbd8 1307
1c6fdbd8
KO
1308#if 0
1309LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
1310LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
1311#endif
1312
2436cb9f
KO
1313#define BCH_MEMBER_STATES() \
1314 x(rw, 0) \
1315 x(ro, 1) \
1316 x(failed, 2) \
1317 x(spare, 3)
1318
1c6fdbd8 1319enum bch_member_state {
2436cb9f
KO
1320#define x(t, n) BCH_MEMBER_STATE_##t = n,
1321 BCH_MEMBER_STATES()
1322#undef x
1323 BCH_MEMBER_STATE_NR
1c6fdbd8
KO
1324};
1325
9af26120 1326struct bch_sb_field_members_v1 {
1c6fdbd8 1327 struct bch_sb_field field;
3f7b9713
HS
1328 struct bch_member _members[]; //Members are now variable size
1329};
1330
1331struct bch_sb_field_members_v2 {
1332 struct bch_sb_field field;
1333 __le16 member_bytes; //size of single member entry
1334 u8 pad[6];
1335 struct bch_member _members[];
1c6fdbd8
KO
1336};
1337
1338/* BCH_SB_FIELD_crypt: */
1339
1340struct nonce {
1341 __le32 d[4];
1342};
1343
1344struct bch_key {
1345 __le64 key[4];
1346};
1347
1348#define BCH_KEY_MAGIC \
a5cf5a4b
KO
1349 (((__u64) 'b' << 0)|((__u64) 'c' << 8)| \
1350 ((__u64) 'h' << 16)|((__u64) '*' << 24)| \
1351 ((__u64) '*' << 32)|((__u64) 'k' << 40)| \
1352 ((__u64) 'e' << 48)|((__u64) 'y' << 56))
1c6fdbd8
KO
1353
1354struct bch_encrypted_key {
1355 __le64 magic;
1356 struct bch_key key;
1357};
1358
1359/*
1360 * If this field is present in the superblock, it stores an encryption key which
1361 * is used encrypt all other data/metadata. The key will normally be encrypted
1362 * with the key userspace provides, but if encryption has been turned off we'll
1363 * just store the master key unencrypted in the superblock so we can access the
1364 * previously encrypted data.
1365 */
1366struct bch_sb_field_crypt {
1367 struct bch_sb_field field;
1368
1369 __le64 flags;
1370 __le64 kdf_flags;
1371 struct bch_encrypted_key key;
1372};
1373
1374LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
1375
1376enum bch_kdf_types {
1377 BCH_KDF_SCRYPT = 0,
1378 BCH_KDF_NR = 1,
1379};
1380
1381/* stored as base 2 log of scrypt params: */
1382LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
1383LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
1384LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
1385
1386/* BCH_SB_FIELD_replicas: */
1387
89fd25be 1388#define BCH_DATA_TYPES() \
822835ff 1389 x(free, 0) \
89fd25be
KO
1390 x(sb, 1) \
1391 x(journal, 2) \
1392 x(btree, 3) \
1393 x(user, 4) \
af4d05c4 1394 x(cached, 5) \
822835ff
KO
1395 x(parity, 6) \
1396 x(stripe, 7) \
1397 x(need_gc_gens, 8) \
1398 x(need_discard, 9)
89fd25be 1399
1c6fdbd8 1400enum bch_data_type {
89fd25be
KO
1401#define x(t, n) BCH_DATA_##t,
1402 BCH_DATA_TYPES()
1403#undef x
1404 BCH_DATA_NR
1c6fdbd8
KO
1405};
1406
822835ff
KO
1407static inline bool data_type_is_empty(enum bch_data_type type)
1408{
1409 switch (type) {
1410 case BCH_DATA_free:
1411 case BCH_DATA_need_gc_gens:
1412 case BCH_DATA_need_discard:
1413 return true;
1414 default:
1415 return false;
1416 }
1417}
1418
1419static inline bool data_type_is_hidden(enum bch_data_type type)
1420{
1421 switch (type) {
1422 case BCH_DATA_sb:
1423 case BCH_DATA_journal:
1424 return true;
1425 default:
1426 return false;
1427 }
1428}
1429
af9d3bc2
KO
1430struct bch_replicas_entry_v0 {
1431 __u8 data_type;
1432 __u8 nr_devs;
5cfd6977 1433 __u8 devs[];
fd0c7679 1434} __packed;
af9d3bc2
KO
1435
1436struct bch_sb_field_replicas_v0 {
1437 struct bch_sb_field field;
5cfd6977 1438 struct bch_replicas_entry_v0 entries[];
fd0c7679 1439} __packed __aligned(8);
af9d3bc2 1440
1c6fdbd8 1441struct bch_replicas_entry {
7a920560
KO
1442 __u8 data_type;
1443 __u8 nr_devs;
af9d3bc2 1444 __u8 nr_required;
5cfd6977 1445 __u8 devs[];
fd0c7679 1446} __packed;
1c6fdbd8 1447
22502ac2
KO
1448#define replicas_entry_bytes(_i) \
1449 (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
1450
1c6fdbd8
KO
1451struct bch_sb_field_replicas {
1452 struct bch_sb_field field;
5cfd6977 1453 struct bch_replicas_entry entries[];
fd0c7679 1454} __packed __aligned(8);
1c6fdbd8
KO
1455
1456/* BCH_SB_FIELD_quota: */
1457
1458struct bch_sb_quota_counter {
1459 __le32 timelimit;
1460 __le32 warnlimit;
1461};
1462
1463struct bch_sb_quota_type {
1464 __le64 flags;
1465 struct bch_sb_quota_counter c[Q_COUNTERS];
1466};
1467
1468struct bch_sb_field_quota {
1469 struct bch_sb_field field;
1470 struct bch_sb_quota_type q[QTYP_NR];
fd0c7679 1471} __packed __aligned(8);
1c6fdbd8
KO
1472
1473/* BCH_SB_FIELD_disk_groups: */
1474
1475#define BCH_SB_LABEL_SIZE 32
1476
1477struct bch_disk_group {
1478 __u8 label[BCH_SB_LABEL_SIZE];
1479 __le64 flags[2];
fd0c7679 1480} __packed __aligned(8);
1c6fdbd8
KO
1481
1482LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
1483LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
1484LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
1485
1486struct bch_sb_field_disk_groups {
1487 struct bch_sb_field field;
5cfd6977 1488 struct bch_disk_group entries[];
fd0c7679 1489} __packed __aligned(8);
1c6fdbd8 1490
104c6974
DH
1491/* BCH_SB_FIELD_counters */
1492
674cfc26
KO
1493#define BCH_PERSISTENT_COUNTERS() \
1494 x(io_read, 0) \
1495 x(io_write, 1) \
1496 x(io_move, 2) \
1497 x(bucket_invalidate, 3) \
1498 x(bucket_discard, 4) \
1499 x(bucket_alloc, 5) \
1500 x(bucket_alloc_fail, 6) \
1501 x(btree_cache_scan, 7) \
1502 x(btree_cache_reap, 8) \
1503 x(btree_cache_cannibalize, 9) \
1504 x(btree_cache_cannibalize_lock, 10) \
1505 x(btree_cache_cannibalize_lock_fail, 11) \
1506 x(btree_cache_cannibalize_unlock, 12) \
1507 x(btree_node_write, 13) \
1508 x(btree_node_read, 14) \
1509 x(btree_node_compact, 15) \
1510 x(btree_node_merge, 16) \
1511 x(btree_node_split, 17) \
1512 x(btree_node_rewrite, 18) \
1513 x(btree_node_alloc, 19) \
1514 x(btree_node_free, 20) \
1515 x(btree_node_set_root, 21) \
1516 x(btree_path_relock_fail, 22) \
1517 x(btree_path_upgrade_fail, 23) \
1518 x(btree_reserve_get_fail, 24) \
1519 x(journal_entry_full, 25) \
1520 x(journal_full, 26) \
1521 x(journal_reclaim_finish, 27) \
1522 x(journal_reclaim_start, 28) \
1523 x(journal_write, 29) \
1524 x(read_promote, 30) \
1525 x(read_bounce, 31) \
1526 x(read_split, 33) \
1527 x(read_retry, 32) \
1528 x(read_reuse_race, 34) \
1529 x(move_extent_read, 35) \
1530 x(move_extent_write, 36) \
1531 x(move_extent_finish, 37) \
1532 x(move_extent_fail, 38) \
1533 x(move_extent_alloc_mem_fail, 39) \
1534 x(copygc, 40) \
1535 x(copygc_wait, 41) \
1536 x(gc_gens_end, 42) \
1537 x(gc_gens_start, 43) \
1538 x(trans_blocked_journal_reclaim, 44) \
1539 x(trans_restart_btree_node_reused, 45) \
1540 x(trans_restart_btree_node_split, 46) \
1541 x(trans_restart_fault_inject, 47) \
1542 x(trans_restart_iter_upgrade, 48) \
1543 x(trans_restart_journal_preres_get, 49) \
1544 x(trans_restart_journal_reclaim, 50) \
1545 x(trans_restart_journal_res_get, 51) \
1546 x(trans_restart_key_cache_key_realloced, 52) \
1547 x(trans_restart_key_cache_raced, 53) \
1548 x(trans_restart_mark_replicas, 54) \
1549 x(trans_restart_mem_realloced, 55) \
1550 x(trans_restart_memory_allocation_failure, 56) \
1551 x(trans_restart_relock, 57) \
1552 x(trans_restart_relock_after_fill, 58) \
1553 x(trans_restart_relock_key_cache_fill, 59) \
1554 x(trans_restart_relock_next_node, 60) \
1555 x(trans_restart_relock_parent_for_fill, 61) \
1556 x(trans_restart_relock_path, 62) \
1557 x(trans_restart_relock_path_intent, 63) \
1558 x(trans_restart_too_many_iters, 64) \
1559 x(trans_restart_traverse, 65) \
1560 x(trans_restart_upgrade, 66) \
1561 x(trans_restart_would_deadlock, 67) \
1562 x(trans_restart_would_deadlock_write, 68) \
1563 x(trans_restart_injected, 69) \
1564 x(trans_restart_key_cache_upgrade, 70) \
1565 x(trans_traverse_all, 71) \
1566 x(transaction_commit, 72) \
33bd5d06 1567 x(write_super, 73) \
920e69bc 1568 x(trans_restart_would_deadlock_recursion_limit, 74) \
e151580d
KO
1569 x(trans_restart_write_buffer_flush, 75) \
1570 x(trans_restart_split_race, 76)
104c6974
DH
1571
1572enum bch_persistent_counters {
1573#define x(t, n, ...) BCH_COUNTER_##t,
1574 BCH_PERSISTENT_COUNTERS()
1575#undef x
1576 BCH_COUNTER_NR
1577};
1578
1579struct bch_sb_field_counters {
1580 struct bch_sb_field field;
5cfd6977 1581 __le64 d[];
104c6974
DH
1582};
1583
1c6fdbd8
KO
1584/*
1585 * On clean shutdown, store btree roots and current journal sequence number in
1586 * the superblock:
1587 */
1588struct jset_entry {
1589 __le16 u64s;
1590 __u8 btree_id;
1591 __u8 level;
1592 __u8 type; /* designates what this jset holds */
1593 __u8 pad[3];
1594
5cfd6977
KO
1595 struct bkey_i start[0];
1596 __u64 _data[];
1c6fdbd8
KO
1597};
1598
1599struct bch_sb_field_clean {
1600 struct bch_sb_field field;
1601
1602 __le32 flags;
2abe5420
KO
1603 __le16 _read_clock; /* no longer used */
1604 __le16 _write_clock;
1c6fdbd8
KO
1605 __le64 journal_seq;
1606
5cfd6977
KO
1607 struct jset_entry start[0];
1608 __u64 _data[];
1c6fdbd8
KO
1609};
1610
1dd7f9d9
KO
1611struct journal_seq_blacklist_entry {
1612 __le64 start;
1613 __le64 end;
1614};
1615
1616struct bch_sb_field_journal_seq_blacklist {
1617 struct bch_sb_field field;
1618
5cfd6977
KO
1619 struct journal_seq_blacklist_entry start[0];
1620 __u64 _data[];
1dd7f9d9
KO
1621};
1622
1c6fdbd8
KO
1623/* Superblock: */
1624
1625/*
26609b61
KO
1626 * New versioning scheme:
1627 * One common version number for all on disk data structures - superblock, btree
1628 * nodes, journal entries
1c6fdbd8 1629 */
ba8eeae8
KO
1630#define BCH_VERSION_MAJOR(_v) ((__u16) ((_v) >> 10))
1631#define BCH_VERSION_MINOR(_v) ((__u16) ((_v) & ~(~0U << 10)))
1632#define BCH_VERSION(_major, _minor) (((_major) << 10)|(_minor) << 0)
26609b61 1633
065bd335
KO
1634#define RECOVERY_PASS_ALL_FSCK (1ULL << 63)
1635
1636#define BCH_METADATA_VERSIONS() \
1637 x(bkey_renumber, BCH_VERSION(0, 10), \
1638 RECOVERY_PASS_ALL_FSCK) \
1639 x(inode_btree_change, BCH_VERSION(0, 11), \
1640 RECOVERY_PASS_ALL_FSCK) \
1641 x(snapshot, BCH_VERSION(0, 12), \
1642 RECOVERY_PASS_ALL_FSCK) \
1643 x(inode_backpointers, BCH_VERSION(0, 13), \
1644 RECOVERY_PASS_ALL_FSCK) \
1645 x(btree_ptr_sectors_written, BCH_VERSION(0, 14), \
1646 RECOVERY_PASS_ALL_FSCK) \
1647 x(snapshot_2, BCH_VERSION(0, 15), \
1648 BIT_ULL(BCH_RECOVERY_PASS_fs_upgrade_for_subvolumes)| \
1649 BIT_ULL(BCH_RECOVERY_PASS_initialize_subvolumes)| \
1650 RECOVERY_PASS_ALL_FSCK) \
1651 x(reflink_p_fix, BCH_VERSION(0, 16), \
1652 BIT_ULL(BCH_RECOVERY_PASS_fix_reflink_p)) \
1653 x(subvol_dirent, BCH_VERSION(0, 17), \
1654 RECOVERY_PASS_ALL_FSCK) \
1655 x(inode_v2, BCH_VERSION(0, 18), \
1656 RECOVERY_PASS_ALL_FSCK) \
1657 x(freespace, BCH_VERSION(0, 19), \
1658 RECOVERY_PASS_ALL_FSCK) \
1659 x(alloc_v4, BCH_VERSION(0, 20), \
1660 RECOVERY_PASS_ALL_FSCK) \
1661 x(new_data_types, BCH_VERSION(0, 21), \
1662 RECOVERY_PASS_ALL_FSCK) \
1663 x(backpointers, BCH_VERSION(0, 22), \
1664 RECOVERY_PASS_ALL_FSCK) \
1665 x(inode_v3, BCH_VERSION(0, 23), \
1666 RECOVERY_PASS_ALL_FSCK) \
1667 x(unwritten_extents, BCH_VERSION(0, 24), \
1668 RECOVERY_PASS_ALL_FSCK) \
1669 x(bucket_gens, BCH_VERSION(0, 25), \
1670 BIT_ULL(BCH_RECOVERY_PASS_bucket_gens_init)| \
1671 RECOVERY_PASS_ALL_FSCK) \
1672 x(lru_v2, BCH_VERSION(0, 26), \
1673 RECOVERY_PASS_ALL_FSCK) \
1674 x(fragmentation_lru, BCH_VERSION(0, 27), \
1675 RECOVERY_PASS_ALL_FSCK) \
1676 x(no_bps_in_alloc_keys, BCH_VERSION(0, 28), \
1677 RECOVERY_PASS_ALL_FSCK) \
1678 x(snapshot_trees, BCH_VERSION(0, 29), \
1679 RECOVERY_PASS_ALL_FSCK) \
1680 x(major_minor, BCH_VERSION(1, 0), \
f26c67f4
KO
1681 0) \
1682 x(snapshot_skiplists, BCH_VERSION(1, 1), \
dde8cb11
KO
1683 BIT_ULL(BCH_RECOVERY_PASS_check_snapshots)) \
1684 x(deleted_inodes, BCH_VERSION(1, 2), \
1685 BIT_ULL(BCH_RECOVERY_PASS_check_inodes))
74b33393 1686
26609b61 1687enum bcachefs_metadata_version {
74b33393 1688 bcachefs_metadata_version_min = 9,
065bd335 1689#define x(t, n, upgrade_passes) bcachefs_metadata_version_##t = n,
74b33393
KO
1690 BCH_METADATA_VERSIONS()
1691#undef x
1692 bcachefs_metadata_version_max
26609b61 1693};
1c6fdbd8 1694
96dea3d5
KO
1695static const __maybe_unused
1696unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_major_minor;
1c59b483 1697
26609b61 1698#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
1c6fdbd8
KO
1699
1700#define BCH_SB_SECTOR 8
1701#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
1702
1703struct bch_sb_layout {
1704 __uuid_t magic; /* bcachefs superblock UUID */
1705 __u8 layout_type;
1706 __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
1707 __u8 nr_superblocks;
1708 __u8 pad[5];
1709 __le64 sb_offset[61];
fd0c7679 1710} __packed __aligned(8);
1c6fdbd8
KO
1711
1712#define BCH_SB_LAYOUT_SECTOR 7
1713
1714/*
1715 * @offset - sector where this sb was written
1716 * @version - on disk format version
26609b61
KO
1717 * @version_min - Oldest metadata version this filesystem contains; so we can
1718 * safely drop compatibility code and refuse to mount filesystems
1719 * we'd need it for
e1538212 1720 * @magic - identifies as a bcachefs superblock (BCHFS_MAGIC)
1c6fdbd8
KO
1721 * @seq - incremented each time superblock is written
1722 * @uuid - used for generating various magic numbers and identifying
1723 * member devices, never changes
1724 * @user_uuid - user visible UUID, may be changed
1725 * @label - filesystem label
1726 * @seq - identifies most recent superblock, incremented each time
1727 * superblock is written
1728 * @features - enabled incompatible features
1729 */
1730struct bch_sb {
1731 struct bch_csum csum;
1732 __le16 version;
1733 __le16 version_min;
1734 __le16 pad[2];
1735 __uuid_t magic;
1736 __uuid_t uuid;
1737 __uuid_t user_uuid;
1738 __u8 label[BCH_SB_LABEL_SIZE];
1739 __le64 offset;
1740 __le64 seq;
1741
1742 __le16 block_size;
1743 __u8 dev_idx;
1744 __u8 nr_devices;
1745 __le32 u64s;
1746
1747 __le64 time_base_lo;
1748 __le32 time_base_hi;
1749 __le32 time_precision;
1750
1751 __le64 flags[8];
1752 __le64 features[2];
1753 __le64 compat[2];
1754
1755 struct bch_sb_layout layout;
1756
5cfd6977
KO
1757 struct bch_sb_field start[0];
1758 __le64 _data[];
fd0c7679 1759} __packed __aligned(8);
1c6fdbd8
KO
1760
1761/*
1762 * Flags:
1763 * BCH_SB_INITALIZED - set on first mount
1764 * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
1765 * behaviour of mount/recovery path:
1766 * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
1767 * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
1768 * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
1769 * DATA/META_CSUM_TYPE. Also indicates encryption
1770 * algorithm in use, if/when we get more than one
1771 */
1772
1773LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16);
1774
1775LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
1776LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
1777LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
1778LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
1779
1780LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
1781
1782LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
1783LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
1784
1785LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
1786LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
1787
1788LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
1789LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
1790
1791LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57);
1792LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
1793LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
1794LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
1795
0bc166ff 1796LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
aae15aaf 1797LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
0bc166ff 1798
7d6f07ed 1799LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63);
36b8372b 1800
1c6fdbd8 1801LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
e86e9124 1802LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1], 4, 8);
1c6fdbd8
KO
1803LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
1804
1805LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
1806LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
1807
1808/*
1809 * Max size of an extent that may require bouncing to read or write
1810 * (checksummed, compressed): 64k
1811 */
1812LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
1813 struct bch_sb, flags[1], 14, 20);
1814
1815LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
1816LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
1817
1818LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40);
1819LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
1820LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
1821
e86e9124 1822LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
1c6fdbd8 1823 struct bch_sb, flags[2], 0, 4);
a50ed7c8 1824LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64);
1c6fdbd8 1825
cd575ddf 1826LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
d042b040 1827LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
b282a74f 1828LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
996fb577 1829LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
2430e72f
KO
1830LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
1831LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
1832LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
fb64f3fd 1833LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
a8b3a677 1834LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34);
920e69bc 1835LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE, struct bch_sb, flags[4], 34, 54);
3045bb95 1836LE64_BITMASK(BCH_SB_VERSION_UPGRADE, struct bch_sb, flags[4], 54, 56);
cd575ddf 1837
e86e9124
KO
1838LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
1839LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
1840 struct bch_sb, flags[4], 60, 64);
24964e1c
KO
1841
1842LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
1843 struct bch_sb, flags[5], 0, 16);
1844
e86e9124
KO
1845static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
1846{
1847 return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
1848}
1849
1850static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
1851{
1852 SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
1853 SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
1854}
1855
1856static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
1857{
1858 return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
1859 (BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
1860}
1861
1862static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
1863{
1864 SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
1865 SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
1866}
1867
1c3ff72c
KO
1868/*
1869 * Features:
1870 *
1871 * journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist
1872 * reflink: gates KEY_TYPE_reflink
1873 * inline_data: gates KEY_TYPE_inline_data
6404dcc9 1874 * new_siphash: gates BCH_STR_HASH_siphash
bcd6f3e0 1875 * new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE
1c3ff72c
KO
1876 */
1877#define BCH_SB_FEATURES() \
1878 x(lz4, 0) \
1879 x(gzip, 1) \
1880 x(zstd, 2) \
1881 x(atomic_nlink, 3) \
1882 x(ec, 4) \
1883 x(journal_seq_blacklist_v3, 5) \
1884 x(reflink, 6) \
1885 x(new_siphash, 7) \
bcd6f3e0 1886 x(inline_data, 8) \
ab05de4c 1887 x(new_extent_overwrite, 9) \
548b3d20 1888 x(incompressible, 10) \
e3e464ac 1889 x(btree_ptr_v2, 11) \
6357d607 1890 x(extents_above_btree_updates, 12) \
801a3de6 1891 x(btree_updates_journalled, 13) \
a3e72262 1892 x(reflink_inline_data, 14) \
adbcada4 1893 x(new_varint, 15) \
7f4e1d5d 1894 x(journal_no_flush, 16) \
8042b5b7
KO
1895 x(alloc_v2, 17) \
1896 x(extents_across_btree_nodes, 18)
1897
1898#define BCH_SB_FEATURES_ALWAYS \
1899 ((1ULL << BCH_FEATURE_new_extent_overwrite)| \
1900 (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
1901 (1ULL << BCH_FEATURE_btree_updates_journalled)|\
73590619 1902 (1ULL << BCH_FEATURE_alloc_v2)|\
8042b5b7 1903 (1ULL << BCH_FEATURE_extents_across_btree_nodes))
1c3ff72c 1904
b807a0c8 1905#define BCH_SB_FEATURES_ALL \
8042b5b7
KO
1906 (BCH_SB_FEATURES_ALWAYS| \
1907 (1ULL << BCH_FEATURE_new_siphash)| \
e3e464ac 1908 (1ULL << BCH_FEATURE_btree_ptr_v2)| \
adbcada4 1909 (1ULL << BCH_FEATURE_new_varint)| \
73590619 1910 (1ULL << BCH_FEATURE_journal_no_flush))
b807a0c8 1911
1c3ff72c
KO
1912enum bch_sb_feature {
1913#define x(f, n) BCH_FEATURE_##f,
1914 BCH_SB_FEATURES()
1915#undef x
c258f28e 1916 BCH_FEATURE_NR,
1c6fdbd8
KO
1917};
1918
19dd3172
KO
1919#define BCH_SB_COMPAT() \
1920 x(alloc_info, 0) \
1921 x(alloc_metadata, 1) \
1922 x(extents_above_btree_updates_done, 2) \
1923 x(bformat_overflow_done, 3)
1924
1df42b57 1925enum bch_sb_compat {
19dd3172
KO
1926#define x(f, n) BCH_COMPAT_##f,
1927 BCH_SB_COMPAT()
1928#undef x
1929 BCH_COMPAT_NR,
1df42b57
KO
1930};
1931
1c6fdbd8
KO
1932/* options: */
1933
3045bb95
KO
1934#define BCH_VERSION_UPGRADE_OPTS() \
1935 x(compatible, 0) \
1936 x(incompatible, 1) \
1937 x(none, 2)
1938
1939enum bch_version_upgrade_opts {
1940#define x(t, n) BCH_VERSION_UPGRADE_##t = n,
1941 BCH_VERSION_UPGRADE_OPTS()
1942#undef x
1943};
1944
1c6fdbd8
KO
1945#define BCH_REPLICAS_MAX 4U
1946
ffb7c3d3
KO
1947#define BCH_BKEY_PTRS_MAX 16U
1948
2436cb9f
KO
1949#define BCH_ERROR_ACTIONS() \
1950 x(continue, 0) \
1951 x(ro, 1) \
1952 x(panic, 2)
1953
1c6fdbd8 1954enum bch_error_actions {
2436cb9f
KO
1955#define x(t, n) BCH_ON_ERROR_##t = n,
1956 BCH_ERROR_ACTIONS()
1957#undef x
1958 BCH_ON_ERROR_NR
1c6fdbd8
KO
1959};
1960
6404dcc9
KO
1961#define BCH_STR_HASH_TYPES() \
1962 x(crc32c, 0) \
1963 x(crc64, 1) \
1964 x(siphash_old, 2) \
1965 x(siphash, 3)
1966
73501ab8 1967enum bch_str_hash_type {
6404dcc9
KO
1968#define x(t, n) BCH_STR_HASH_##t = n,
1969 BCH_STR_HASH_TYPES()
1970#undef x
1971 BCH_STR_HASH_NR
73501ab8
KO
1972};
1973
2436cb9f
KO
1974#define BCH_STR_HASH_OPTS() \
1975 x(crc32c, 0) \
1976 x(crc64, 1) \
1977 x(siphash, 2)
1978
73501ab8 1979enum bch_str_hash_opts {
2436cb9f
KO
1980#define x(t, n) BCH_STR_HASH_OPT_##t = n,
1981 BCH_STR_HASH_OPTS()
1982#undef x
1983 BCH_STR_HASH_OPT_NR
1c6fdbd8
KO
1984};
1985
6404dcc9
KO
1986#define BCH_CSUM_TYPES() \
1987 x(none, 0) \
1988 x(crc32c_nonzero, 1) \
1989 x(crc64_nonzero, 2) \
1990 x(chacha20_poly1305_80, 3) \
1991 x(chacha20_poly1305_128, 4) \
1992 x(crc32c, 5) \
1993 x(crc64, 6) \
1994 x(xxhash, 7)
1995
1c3ff72c 1996enum bch_csum_type {
6404dcc9
KO
1997#define x(t, n) BCH_CSUM_##t = n,
1998 BCH_CSUM_TYPES()
1999#undef x
2000 BCH_CSUM_NR
1c3ff72c
KO
2001};
2002
96dea3d5 2003static const __maybe_unused unsigned bch_crc_bytes[] = {
6404dcc9
KO
2004 [BCH_CSUM_none] = 0,
2005 [BCH_CSUM_crc32c_nonzero] = 4,
2006 [BCH_CSUM_crc32c] = 4,
2007 [BCH_CSUM_crc64_nonzero] = 8,
2008 [BCH_CSUM_crc64] = 8,
2009 [BCH_CSUM_xxhash] = 8,
2010 [BCH_CSUM_chacha20_poly1305_80] = 10,
2011 [BCH_CSUM_chacha20_poly1305_128] = 16,
1c3ff72c
KO
2012};
2013
2014static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
2015{
2016 switch (type) {
6404dcc9
KO
2017 case BCH_CSUM_chacha20_poly1305_80:
2018 case BCH_CSUM_chacha20_poly1305_128:
1c3ff72c
KO
2019 return true;
2020 default:
2021 return false;
2022 }
2023}
2024
2436cb9f
KO
2025#define BCH_CSUM_OPTS() \
2026 x(none, 0) \
2027 x(crc32c, 1) \
41e63382 2028 x(crc64, 2) \
2029 x(xxhash, 3)
2436cb9f 2030
1c3ff72c 2031enum bch_csum_opts {
2436cb9f
KO
2032#define x(t, n) BCH_CSUM_OPT_##t = n,
2033 BCH_CSUM_OPTS()
2034#undef x
2035 BCH_CSUM_OPT_NR
1c3ff72c
KO
2036};
2037
1c6fdbd8 2038#define BCH_COMPRESSION_TYPES() \
ab05de4c
KO
2039 x(none, 0) \
2040 x(lz4_old, 1) \
2041 x(gzip, 2) \
2042 x(lz4, 3) \
2043 x(zstd, 4) \
2044 x(incompressible, 5)
1c6fdbd8 2045
1c3ff72c 2046enum bch_compression_type {
2436cb9f 2047#define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
1c6fdbd8 2048 BCH_COMPRESSION_TYPES()
1c3ff72c
KO
2049#undef x
2050 BCH_COMPRESSION_TYPE_NR
2051};
2052
2053#define BCH_COMPRESSION_OPTS() \
2054 x(none, 0) \
2055 x(lz4, 1) \
2056 x(gzip, 2) \
2057 x(zstd, 3)
2058
2059enum bch_compression_opts {
2436cb9f 2060#define x(t, n) BCH_COMPRESSION_OPT_##t = n,
1c3ff72c 2061 BCH_COMPRESSION_OPTS()
1c6fdbd8
KO
2062#undef x
2063 BCH_COMPRESSION_OPT_NR
2064};
2065
2066/*
2067 * Magic numbers
2068 *
2069 * The various other data structures have their own magic numbers, which are
2070 * xored with the first part of the cache set's UUID
2071 */
2072
2073#define BCACHE_MAGIC \
2074 UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca, \
2075 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
2076#define BCHFS_MAGIC \
2077 UUID_INIT(0xc68573f6, 0x66ce, 0x90a9, \
2078 0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
2079
2080#define BCACHEFS_STATFS_MAGIC 0xca451a4e
2081
2082#define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
2083#define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
2084
2085static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
2086{
2087 __le64 ret;
a1019576 2088
1c6fdbd8
KO
2089 memcpy(&ret, &sb->uuid, sizeof(ret));
2090 return ret;
2091}
2092
2093static inline __u64 __jset_magic(struct bch_sb *sb)
2094{
2095 return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
2096}
2097
2098static inline __u64 __bset_magic(struct bch_sb *sb)
2099{
2100 return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
2101}
2102
2103/* Journal */
2104
1c6fdbd8
KO
2105#define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
2106
2107#define BCH_JSET_ENTRY_TYPES() \
2108 x(btree_keys, 0) \
2109 x(btree_root, 1) \
2110 x(prio_ptrs, 2) \
2111 x(blacklist, 3) \
2c5af169 2112 x(blacklist_v2, 4) \
3577df5f 2113 x(usage, 5) \
2abe5420 2114 x(data_usage, 6) \
180fb49d 2115 x(clock, 7) \
fb64f3fd 2116 x(dev_usage, 8) \
cb685ce7
KO
2117 x(log, 9) \
2118 x(overwrite, 10)
1c6fdbd8
KO
2119
2120enum {
2121#define x(f, nr) BCH_JSET_ENTRY_##f = nr,
2122 BCH_JSET_ENTRY_TYPES()
2123#undef x
2124 BCH_JSET_ENTRY_NR
2125};
2126
2127/*
2128 * Journal sequence numbers can be blacklisted: bsets record the max sequence
2129 * number of all the journal entries they contain updates for, so that on
2130 * recovery we can ignore those bsets that contain index updates newer that what
2131 * made it into the journal.
2132 *
2133 * This means that we can't reuse that journal_seq - we have to skip it, and
2134 * then record that we skipped it so that the next time we crash and recover we
2135 * don't think there was a missing journal entry.
2136 */
2137struct jset_entry_blacklist {
2138 struct jset_entry entry;
2139 __le64 seq;
2140};
2141
2142struct jset_entry_blacklist_v2 {
2143 struct jset_entry entry;
2144 __le64 start;
2145 __le64 end;
2146};
2147
528b18e6
KO
2148#define BCH_FS_USAGE_TYPES() \
2149 x(reserved, 0) \
2150 x(inodes, 1) \
2151 x(key_version, 2)
2152
2c5af169 2153enum {
528b18e6
KO
2154#define x(f, nr) BCH_FS_USAGE_##f = nr,
2155 BCH_FS_USAGE_TYPES()
2156#undef x
2157 BCH_FS_USAGE_NR
2c5af169
KO
2158};
2159
2160struct jset_entry_usage {
2161 struct jset_entry entry;
3577df5f 2162 __le64 v;
fd0c7679 2163} __packed;
3577df5f
KO
2164
2165struct jset_entry_data_usage {
2166 struct jset_entry entry;
2167 __le64 v;
2c5af169 2168 struct bch_replicas_entry r;
fd0c7679 2169} __packed;
2c5af169 2170
2abe5420
KO
2171struct jset_entry_clock {
2172 struct jset_entry entry;
2173 __u8 rw;
2174 __u8 pad[7];
2175 __le64 time;
fd0c7679 2176} __packed;
2abe5420 2177
180fb49d
KO
2178struct jset_entry_dev_usage_type {
2179 __le64 buckets;
2180 __le64 sectors;
2181 __le64 fragmented;
fd0c7679 2182} __packed;
180fb49d
KO
2183
2184struct jset_entry_dev_usage {
2185 struct jset_entry entry;
2186 __le32 dev;
2187 __u32 pad;
2188
2189 __le64 buckets_ec;
822835ff 2190 __le64 _buckets_unavailable; /* No longer used */
180fb49d
KO
2191
2192 struct jset_entry_dev_usage_type d[];
bf5a261c 2193};
180fb49d 2194
528b18e6
KO
2195static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
2196{
2197 return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
2198 sizeof(struct jset_entry_dev_usage_type);
2199}
2200
fb64f3fd
KO
2201struct jset_entry_log {
2202 struct jset_entry entry;
2203 u8 d[];
fd0c7679 2204} __packed;
fb64f3fd 2205
1c6fdbd8
KO
2206/*
2207 * On disk format for a journal entry:
2208 * seq is monotonically increasing; every journal entry has its own unique
2209 * sequence number.
2210 *
2211 * last_seq is the oldest journal entry that still has keys the btree hasn't
2212 * flushed to disk yet.
2213 *
2214 * version is for on disk format changes.
2215 */
2216struct jset {
2217 struct bch_csum csum;
2218
2219 __le64 magic;
2220 __le64 seq;
2221 __le32 version;
2222 __le32 flags;
2223
2224 __le32 u64s; /* size of d[] in u64s */
2225
2226 __u8 encrypted_start[0];
2227
2abe5420
KO
2228 __le16 _read_clock; /* no longer used */
2229 __le16 _write_clock;
1c6fdbd8
KO
2230
2231 /* Sequence number of oldest dirty journal entry */
2232 __le64 last_seq;
2233
2234
5cfd6977
KO
2235 struct jset_entry start[0];
2236 __u64 _data[];
fd0c7679 2237} __packed __aligned(8);
1c6fdbd8
KO
2238
2239LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
2240LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
adbcada4 2241LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
1c6fdbd8 2242
8b335bae 2243#define BCH_JOURNAL_BUCKETS_MIN 8
1c6fdbd8
KO
2244
2245/* Btree: */
2246
e8d2fe3b
KO
2247enum btree_id_flags {
2248 BTREE_ID_EXTENTS = BIT(0),
2249 BTREE_ID_SNAPSHOTS = BIT(1),
2250 BTREE_ID_DATA = BIT(2),
2251};
2252
2253#define BCH_BTREE_IDS() \
2254 x(extents, 0, BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
2255 BIT_ULL(KEY_TYPE_whiteout)| \
2256 BIT_ULL(KEY_TYPE_error)| \
2257 BIT_ULL(KEY_TYPE_cookie)| \
2258 BIT_ULL(KEY_TYPE_extent)| \
2259 BIT_ULL(KEY_TYPE_reservation)| \
2260 BIT_ULL(KEY_TYPE_reflink_p)| \
2261 BIT_ULL(KEY_TYPE_inline_data)) \
2262 x(inodes, 1, BTREE_ID_SNAPSHOTS, \
2263 BIT_ULL(KEY_TYPE_whiteout)| \
2264 BIT_ULL(KEY_TYPE_inode)| \
2265 BIT_ULL(KEY_TYPE_inode_v2)| \
2266 BIT_ULL(KEY_TYPE_inode_v3)| \
2267 BIT_ULL(KEY_TYPE_inode_generation)) \
2268 x(dirents, 2, BTREE_ID_SNAPSHOTS, \
2269 BIT_ULL(KEY_TYPE_whiteout)| \
2270 BIT_ULL(KEY_TYPE_hash_whiteout)| \
2271 BIT_ULL(KEY_TYPE_dirent)) \
2272 x(xattrs, 3, BTREE_ID_SNAPSHOTS, \
2273 BIT_ULL(KEY_TYPE_whiteout)| \
2274 BIT_ULL(KEY_TYPE_cookie)| \
2275 BIT_ULL(KEY_TYPE_hash_whiteout)| \
2276 BIT_ULL(KEY_TYPE_xattr)) \
2277 x(alloc, 4, 0, \
2278 BIT_ULL(KEY_TYPE_alloc)| \
2279 BIT_ULL(KEY_TYPE_alloc_v2)| \
2280 BIT_ULL(KEY_TYPE_alloc_v3)| \
2281 BIT_ULL(KEY_TYPE_alloc_v4)) \
2282 x(quotas, 5, 0, \
2283 BIT_ULL(KEY_TYPE_quota)) \
2284 x(stripes, 6, 0, \
2285 BIT_ULL(KEY_TYPE_stripe)) \
2286 x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \
2287 BIT_ULL(KEY_TYPE_reflink_v)| \
2288 BIT_ULL(KEY_TYPE_indirect_inline_data)) \
2289 x(subvolumes, 8, 0, \
2290 BIT_ULL(KEY_TYPE_subvolume)) \
2291 x(snapshots, 9, 0, \
2292 BIT_ULL(KEY_TYPE_snapshot)) \
2293 x(lru, 10, 0, \
2294 BIT_ULL(KEY_TYPE_set)) \
2295 x(freespace, 11, BTREE_ID_EXTENTS, \
2296 BIT_ULL(KEY_TYPE_set)) \
2297 x(need_discard, 12, 0, \
2298 BIT_ULL(KEY_TYPE_set)) \
2299 x(backpointers, 13, 0, \
2300 BIT_ULL(KEY_TYPE_backpointer)) \
2301 x(bucket_gens, 14, 0, \
2302 BIT_ULL(KEY_TYPE_bucket_gens)) \
2303 x(snapshot_trees, 15, 0, \
dde8cb11
KO
2304 BIT_ULL(KEY_TYPE_snapshot_tree)) \
2305 x(deleted_inodes, 16, BTREE_ID_SNAPSHOTS, \
aaad530a
KO
2306 BIT_ULL(KEY_TYPE_set)) \
2307 x(logged_ops, 17, 0, \
f3e374ef
KO
2308 BIT_ULL(KEY_TYPE_logged_op_truncate)| \
2309 BIT_ULL(KEY_TYPE_logged_op_finsert))
1c6fdbd8
KO
2310
2311enum btree_id {
e8d2fe3b 2312#define x(name, nr, ...) BTREE_ID_##name = nr,
26609b61
KO
2313 BCH_BTREE_IDS()
2314#undef x
1c6fdbd8
KO
2315 BTREE_ID_NR
2316};
2317
1c6fdbd8
KO
2318#define BTREE_MAX_DEPTH 4U
2319
2320/* Btree nodes */
2321
1c6fdbd8
KO
2322/*
2323 * Btree nodes
2324 *
2325 * On disk a btree node is a list/log of these; within each set the keys are
2326 * sorted
2327 */
2328struct bset {
2329 __le64 seq;
2330
2331 /*
2332 * Highest journal entry this bset contains keys for.
2333 * If on recovery we don't see that journal entry, this bset is ignored:
2334 * this allows us to preserve the order of all index updates after a
2335 * crash, since the journal records a total order of all index updates
2336 * and anything that didn't make it to the journal doesn't get used.
2337 */
2338 __le64 journal_seq;
2339
2340 __le32 flags;
2341 __le16 version;
2342 __le16 u64s; /* count of d[] in u64s */
2343
5cfd6977
KO
2344 struct bkey_packed start[0];
2345 __u64 _data[];
fd0c7679 2346} __packed __aligned(8);
1c6fdbd8
KO
2347
2348LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
2349
2350LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
2351LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
2352 struct bset, flags, 5, 6);
2353
e719fc34
KO
2354/* Sector offset within the btree node: */
2355LE32_BITMASK(BSET_OFFSET, struct bset, flags, 16, 32);
2356
1c6fdbd8
KO
2357struct btree_node {
2358 struct bch_csum csum;
2359 __le64 magic;
2360
2361 /* this flags field is encrypted, unlike bset->flags: */
2362 __le64 flags;
2363
2364 /* Closed interval: */
2365 struct bpos min_key;
2366 struct bpos max_key;
e751c01a 2367 struct bch_extent_ptr _ptr; /* not used anymore */
1c6fdbd8
KO
2368 struct bkey_format format;
2369
2370 union {
2371 struct bset keys;
2372 struct {
2373 __u8 pad[22];
2374 __le16 u64s;
2375 __u64 _data[0];
2376
2377 };
2378 };
fd0c7679 2379} __packed __aligned(8);
1c6fdbd8 2380
4e1430a7 2381LE64_BITMASK(BTREE_NODE_ID_LO, struct btree_node, flags, 0, 4);
1c6fdbd8 2382LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
bcd6f3e0
KO
2383LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
2384 struct btree_node, flags, 8, 9);
4e1430a7
KO
2385LE64_BITMASK(BTREE_NODE_ID_HI, struct btree_node, flags, 9, 25);
2386/* 25-32 unused */
1c6fdbd8
KO
2387LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64);
2388
4e1430a7
KO
2389static inline __u64 BTREE_NODE_ID(struct btree_node *n)
2390{
2391 return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
2392}
2393
a5cf5a4b 2394static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
4e1430a7
KO
2395{
2396 SET_BTREE_NODE_ID_LO(n, v);
2397 SET_BTREE_NODE_ID_HI(n, v >> 4);
2398}
2399
1c6fdbd8
KO
2400struct btree_node_entry {
2401 struct bch_csum csum;
2402
2403 union {
2404 struct bset keys;
2405 struct {
2406 __u8 pad[22];
2407 __le16 u64s;
2408 __u64 _data[0];
1c6fdbd8
KO
2409 };
2410 };
fd0c7679 2411} __packed __aligned(8);
1c6fdbd8
KO
2412
2413#endif /* _BCACHEFS_FORMAT_H */