Commit | Line | Data |
---|---|---|
81ab4190 KO |
1 | #ifndef _LINUX_BCACHE_H |
2 | #define _LINUX_BCACHE_H | |
3 | ||
4 | /* | |
5 | * Bcache on disk data structures | |
6 | */ | |
7 | ||
8 | #include <asm/types.h> | |
9 | ||
10 | #define BITMASK(name, type, field, offset, size) \ | |
11 | static inline __u64 name(const type *k) \ | |
12 | { return (k->field >> offset) & ~(~0ULL << size); } \ | |
13 | \ | |
14 | static inline void SET_##name(type *k, __u64 v) \ | |
15 | { \ | |
16 | k->field &= ~(~(~0ULL << size) << offset); \ | |
17 | k->field |= (v & ~(~0ULL << size)) << offset; \ | |
18 | } | |
19 | ||
20 | /* Btree keys - all units are in sectors */ | |
21 | ||
22 | struct bkey { | |
23 | __u64 high; | |
24 | __u64 low; | |
25 | __u64 ptr[]; | |
26 | }; | |
27 | ||
28 | #define KEY_FIELD(name, field, offset, size) \ | |
29 | BITMASK(name, struct bkey, field, offset, size) | |
30 | ||
31 | #define PTR_FIELD(name, offset, size) \ | |
32 | static inline __u64 name(const struct bkey *k, unsigned i) \ | |
33 | { return (k->ptr[i] >> offset) & ~(~0ULL << size); } \ | |
34 | \ | |
35 | static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \ | |
36 | { \ | |
37 | k->ptr[i] &= ~(~(~0ULL << size) << offset); \ | |
38 | k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \ | |
39 | } | |
40 | ||
41 | #define KEY_SIZE_BITS 16 | |
59158fde | 42 | #define KEY_MAX_U64S 8 |
81ab4190 KO |
43 | |
44 | KEY_FIELD(KEY_PTRS, high, 60, 3) | |
45 | KEY_FIELD(HEADER_SIZE, high, 58, 2) | |
46 | KEY_FIELD(KEY_CSUM, high, 56, 2) | |
47 | KEY_FIELD(KEY_PINNED, high, 55, 1) | |
48 | KEY_FIELD(KEY_DIRTY, high, 36, 1) | |
49 | ||
50 | KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS) | |
51 | KEY_FIELD(KEY_INODE, high, 0, 20) | |
52 | ||
53 | /* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */ | |
54 | ||
55 | static inline __u64 KEY_OFFSET(const struct bkey *k) | |
56 | { | |
57 | return k->low; | |
58 | } | |
59 | ||
60 | static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v) | |
61 | { | |
62 | k->low = v; | |
63 | } | |
64 | ||
65 | /* | |
66 | * The high bit being set is a relic from when we used it to do binary | |
67 | * searches - it told you where a key started. It's not used anymore, | |
68 | * and can probably be safely dropped. | |
69 | */ | |
70 | #define KEY(inode, offset, size) \ | |
71 | ((struct bkey) { \ | |
72 | .high = (1ULL << 63) | ((__u64) (size) << 20) | (inode), \ | |
73 | .low = (offset) \ | |
74 | }) | |
75 | ||
76 | #define ZERO_KEY KEY(0, 0, 0) | |
77 | ||
78 | #define MAX_KEY_INODE (~(~0 << 20)) | |
79 | #define MAX_KEY_OFFSET (~0ULL >> 1) | |
80 | #define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0) | |
81 | ||
82 | #define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k)) | |
83 | #define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0) | |
84 | ||
85 | #define PTR_DEV_BITS 12 | |
86 | ||
87 | PTR_FIELD(PTR_DEV, 51, PTR_DEV_BITS) | |
88 | PTR_FIELD(PTR_OFFSET, 8, 43) | |
89 | PTR_FIELD(PTR_GEN, 0, 8) | |
90 | ||
91 | #define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1) | |
92 | ||
93 | #define PTR(gen, offset, dev) \ | |
94 | ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen) | |
95 | ||
96 | /* Bkey utility code */ | |
97 | ||
98 | static inline unsigned long bkey_u64s(const struct bkey *k) | |
99 | { | |
100 | return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k); | |
101 | } | |
102 | ||
103 | static inline unsigned long bkey_bytes(const struct bkey *k) | |
104 | { | |
105 | return bkey_u64s(k) * sizeof(__u64); | |
106 | } | |
107 | ||
108 | #define bkey_copy(_dest, _src) memcpy(_dest, _src, bkey_bytes(_src)) | |
109 | ||
110 | static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src) | |
111 | { | |
112 | SET_KEY_INODE(dest, KEY_INODE(src)); | |
113 | SET_KEY_OFFSET(dest, KEY_OFFSET(src)); | |
114 | } | |
115 | ||
116 | static inline struct bkey *bkey_next(const struct bkey *k) | |
117 | { | |
118 | __u64 *d = (void *) k; | |
119 | return (struct bkey *) (d + bkey_u64s(k)); | |
120 | } | |
121 | ||
fafff81c | 122 | static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys) |
81ab4190 KO |
123 | { |
124 | __u64 *d = (void *) k; | |
125 | return (struct bkey *) (d + nr_keys); | |
126 | } | |
127 | /* Enough for a key with 6 pointers */ | |
128 | #define BKEY_PAD 8 | |
129 | ||
130 | #define BKEY_PADDED(key) \ | |
131 | union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; } | |
132 | ||
133 | /* Superblock */ | |
134 | ||
135 | /* Version 0: Cache device | |
136 | * Version 1: Backing device | |
137 | * Version 2: Seed pointer into btree node checksum | |
138 | * Version 3: Cache device with new UUID format | |
139 | * Version 4: Backing device with data offset | |
140 | */ | |
141 | #define BCACHE_SB_VERSION_CDEV 0 | |
142 | #define BCACHE_SB_VERSION_BDEV 1 | |
143 | #define BCACHE_SB_VERSION_CDEV_WITH_UUID 3 | |
144 | #define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4 | |
145 | #define BCACHE_SB_MAX_VERSION 4 | |
146 | ||
147 | #define SB_SECTOR 8 | |
148 | #define SB_SIZE 4096 | |
149 | #define SB_LABEL_SIZE 32 | |
150 | #define SB_JOURNAL_BUCKETS 256U | |
151 | /* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */ | |
152 | #define MAX_CACHES_PER_SET 8 | |
153 | ||
154 | #define BDEV_DATA_START_DEFAULT 16 /* sectors */ | |
155 | ||
156 | struct cache_sb { | |
157 | __u64 csum; | |
158 | __u64 offset; /* sector where this sb was written */ | |
159 | __u64 version; | |
160 | ||
161 | __u8 magic[16]; | |
162 | ||
163 | __u8 uuid[16]; | |
164 | union { | |
165 | __u8 set_uuid[16]; | |
166 | __u64 set_magic; | |
167 | }; | |
168 | __u8 label[SB_LABEL_SIZE]; | |
169 | ||
170 | __u64 flags; | |
171 | __u64 seq; | |
172 | __u64 pad[8]; | |
173 | ||
174 | union { | |
175 | struct { | |
176 | /* Cache devices */ | |
177 | __u64 nbuckets; /* device size */ | |
178 | ||
179 | __u16 block_size; /* sectors */ | |
180 | __u16 bucket_size; /* sectors */ | |
181 | ||
182 | __u16 nr_in_set; | |
183 | __u16 nr_this_dev; | |
184 | }; | |
185 | struct { | |
186 | /* Backing devices */ | |
187 | __u64 data_offset; | |
188 | ||
189 | /* | |
190 | * block_size from the cache device section is still used by | |
191 | * backing devices, so don't add anything here until we fix | |
192 | * things to not need it for backing devices anymore | |
193 | */ | |
194 | }; | |
195 | }; | |
196 | ||
197 | __u32 last_mount; /* time_t */ | |
198 | ||
199 | __u16 first_bucket; | |
200 | union { | |
201 | __u16 njournal_buckets; | |
202 | __u16 keys; | |
203 | }; | |
204 | __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */ | |
205 | }; | |
206 | ||
207 | static inline _Bool SB_IS_BDEV(const struct cache_sb *sb) | |
208 | { | |
209 | return sb->version == BCACHE_SB_VERSION_BDEV | |
210 | || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET; | |
211 | } | |
212 | ||
213 | BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1); | |
214 | BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1); | |
215 | BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3); | |
216 | #define CACHE_REPLACEMENT_LRU 0U | |
217 | #define CACHE_REPLACEMENT_FIFO 1U | |
218 | #define CACHE_REPLACEMENT_RANDOM 2U | |
219 | ||
220 | BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4); | |
221 | #define CACHE_MODE_WRITETHROUGH 0U | |
222 | #define CACHE_MODE_WRITEBACK 1U | |
223 | #define CACHE_MODE_WRITEAROUND 2U | |
224 | #define CACHE_MODE_NONE 3U | |
225 | BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2); | |
226 | #define BDEV_STATE_NONE 0U | |
227 | #define BDEV_STATE_CLEAN 1U | |
228 | #define BDEV_STATE_DIRTY 2U | |
229 | #define BDEV_STATE_STALE 3U | |
230 | ||
231 | /* | |
232 | * Magic numbers | |
233 | * | |
234 | * The various other data structures have their own magic numbers, which are | |
235 | * xored with the first part of the cache set's UUID | |
236 | */ | |
237 | ||
238 | #define JSET_MAGIC 0x245235c1a3625032ULL | |
239 | #define PSET_MAGIC 0x6750e15f87337f91ULL | |
240 | #define BSET_MAGIC 0x90135c78b99e07f5ULL | |
241 | ||
242 | static inline __u64 jset_magic(struct cache_sb *sb) | |
243 | { | |
244 | return sb->set_magic ^ JSET_MAGIC; | |
245 | } | |
246 | ||
247 | static inline __u64 pset_magic(struct cache_sb *sb) | |
248 | { | |
249 | return sb->set_magic ^ PSET_MAGIC; | |
250 | } | |
251 | ||
252 | static inline __u64 bset_magic(struct cache_sb *sb) | |
253 | { | |
254 | return sb->set_magic ^ BSET_MAGIC; | |
255 | } | |
256 | ||
257 | /* | |
258 | * Journal | |
259 | * | |
260 | * On disk format for a journal entry: | |
261 | * seq is monotonically increasing; every journal entry has its own unique | |
262 | * sequence number. | |
263 | * | |
264 | * last_seq is the oldest journal entry that still has keys the btree hasn't | |
265 | * flushed to disk yet. | |
266 | * | |
267 | * version is for on disk format changes. | |
268 | */ | |
269 | ||
270 | #define BCACHE_JSET_VERSION_UUIDv1 1 | |
271 | #define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */ | |
272 | #define BCACHE_JSET_VERSION 1 | |
273 | ||
274 | struct jset { | |
275 | __u64 csum; | |
276 | __u64 magic; | |
277 | __u64 seq; | |
278 | __u32 version; | |
279 | __u32 keys; | |
280 | ||
281 | __u64 last_seq; | |
282 | ||
283 | BKEY_PADDED(uuid_bucket); | |
284 | BKEY_PADDED(btree_root); | |
285 | __u16 btree_level; | |
286 | __u16 pad[3]; | |
287 | ||
288 | __u64 prio_bucket[MAX_CACHES_PER_SET]; | |
289 | ||
290 | union { | |
291 | struct bkey start[0]; | |
292 | __u64 d[0]; | |
293 | }; | |
294 | }; | |
295 | ||
296 | /* Bucket prios/gens */ | |
297 | ||
298 | struct prio_set { | |
299 | __u64 csum; | |
300 | __u64 magic; | |
301 | __u64 seq; | |
302 | __u32 version; | |
303 | __u32 pad; | |
304 | ||
305 | __u64 next_bucket; | |
306 | ||
307 | struct bucket_disk { | |
308 | __u16 prio; | |
309 | __u8 gen; | |
310 | } __attribute((packed)) data[]; | |
311 | }; | |
312 | ||
313 | /* UUIDS - per backing device/flash only volume metadata */ | |
314 | ||
315 | struct uuid_entry { | |
316 | union { | |
317 | struct { | |
318 | __u8 uuid[16]; | |
319 | __u8 label[32]; | |
320 | __u32 first_reg; | |
321 | __u32 last_reg; | |
322 | __u32 invalidated; | |
323 | ||
324 | __u32 flags; | |
325 | /* Size of flash only volumes */ | |
326 | __u64 sectors; | |
327 | }; | |
328 | ||
329 | __u8 pad[128]; | |
330 | }; | |
331 | }; | |
332 | ||
333 | BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1); | |
334 | ||
335 | /* Btree nodes */ | |
336 | ||
337 | /* Version 1: Seed pointer into btree node checksum | |
338 | */ | |
339 | #define BCACHE_BSET_CSUM 1 | |
340 | #define BCACHE_BSET_VERSION 1 | |
341 | ||
342 | /* | |
343 | * Btree nodes | |
344 | * | |
345 | * On disk a btree node is a list/log of these; within each set the keys are | |
346 | * sorted | |
347 | */ | |
348 | struct bset { | |
349 | __u64 csum; | |
350 | __u64 magic; | |
351 | __u64 seq; | |
352 | __u32 version; | |
353 | __u32 keys; | |
354 | ||
355 | union { | |
356 | struct bkey start[0]; | |
357 | __u64 d[0]; | |
358 | }; | |
359 | }; | |
360 | ||
361 | /* OBSOLETE */ | |
362 | ||
363 | /* UUIDS - per backing device/flash only volume metadata */ | |
364 | ||
365 | struct uuid_entry_v0 { | |
366 | __u8 uuid[16]; | |
367 | __u8 label[32]; | |
368 | __u32 first_reg; | |
369 | __u32 last_reg; | |
370 | __u32 invalidated; | |
371 | __u32 pad; | |
372 | }; | |
373 | ||
374 | #endif /* _LINUX_BCACHE_H */ |