Commit | Line | Data |
---|---|---|
1c6fdbd8 KO |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _BCACHEFS_H | |
3 | #define _BCACHEFS_H | |
4 | ||
5 | /* | |
6 | * SOME HIGH LEVEL CODE DOCUMENTATION: | |
7 | * | |
8 | * Bcache mostly works with cache sets, cache devices, and backing devices. | |
9 | * | |
10 | * Support for multiple cache devices hasn't quite been finished off yet, but | |
11 | * it's about 95% plumbed through. A cache set and its cache devices is sort of | |
12 | * like a md raid array and its component devices. Most of the code doesn't care | |
13 | * about individual cache devices, the main abstraction is the cache set. | |
14 | * | |
15 | * Multiple cache devices is intended to give us the ability to mirror dirty | |
16 | * cached data and metadata, without mirroring clean cached data. | |
17 | * | |
18 | * Backing devices are different, in that they have a lifetime independent of a | |
19 | * cache set. When you register a newly formatted backing device it'll come up | |
20 | * in passthrough mode, and then you can attach and detach a backing device from | |
21 | * a cache set at runtime - while it's mounted and in use. Detaching implicitly | |
22 | * invalidates any cached data for that backing device. | |
23 | * | |
24 | * A cache set can have multiple (many) backing devices attached to it. | |
25 | * | |
26 | * There's also flash only volumes - this is the reason for the distinction | |
27 | * between struct cached_dev and struct bcache_device. A flash only volume | |
28 | * works much like a bcache device that has a backing device, except the | |
29 | * "cached" data is always dirty. The end result is that we get thin | |
30 | * provisioning with very little additional code. | |
31 | * | |
32 | * Flash only volumes work but they're not production ready because the moving | |
33 | * garbage collector needs more work. More on that later. | |
34 | * | |
35 | * BUCKETS/ALLOCATION: | |
36 | * | |
37 | * Bcache is primarily designed for caching, which means that in normal | |
38 | * operation all of our available space will be allocated. Thus, we need an | |
39 | * efficient way of deleting things from the cache so we can write new things to | |
40 | * it. | |
41 | * | |
42 | * To do this, we first divide the cache device up into buckets. A bucket is the | |
43 | * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+ | |
44 | * works efficiently. | |
45 | * | |
46 | * Each bucket has a 16 bit priority, and an 8 bit generation associated with | |
47 | * it. The gens and priorities for all the buckets are stored contiguously and | |
48 | * packed on disk (in a linked list of buckets - aside from the superblock, all | |
49 | * of bcache's metadata is stored in buckets). | |
50 | * | |
51 | * The priority is used to implement an LRU. We reset a bucket's priority when | |
52 | * we allocate it or on cache it, and every so often we decrement the priority | |
53 | * of each bucket. It could be used to implement something more sophisticated, | |
54 | * if anyone ever gets around to it. | |
55 | * | |
56 | * The generation is used for invalidating buckets. Each pointer also has an 8 | |
57 | * bit generation embedded in it; for a pointer to be considered valid, its gen | |
58 | * must match the gen of the bucket it points into. Thus, to reuse a bucket all | |
59 | * we have to do is increment its gen (and write its new gen to disk; we batch | |
60 | * this up). | |
61 | * | |
62 | * Bcache is entirely COW - we never write twice to a bucket, even buckets that | |
63 | * contain metadata (including btree nodes). | |
64 | * | |
65 | * THE BTREE: | |
66 | * | |
67 | * Bcache is in large part design around the btree. | |
68 | * | |
69 | * At a high level, the btree is just an index of key -> ptr tuples. | |
70 | * | |
71 | * Keys represent extents, and thus have a size field. Keys also have a variable | |
72 | * number of pointers attached to them (potentially zero, which is handy for | |
73 | * invalidating the cache). | |
74 | * | |
75 | * The key itself is an inode:offset pair. The inode number corresponds to a | |
76 | * backing device or a flash only volume. The offset is the ending offset of the | |
77 | * extent within the inode - not the starting offset; this makes lookups | |
78 | * slightly more convenient. | |
79 | * | |
80 | * Pointers contain the cache device id, the offset on that device, and an 8 bit | |
81 | * generation number. More on the gen later. | |
82 | * | |
83 | * Index lookups are not fully abstracted - cache lookups in particular are | |
84 | * still somewhat mixed in with the btree code, but things are headed in that | |
85 | * direction. | |
86 | * | |
87 | * Updates are fairly well abstracted, though. There are two different ways of | |
88 | * updating the btree; insert and replace. | |
89 | * | |
90 | * BTREE_INSERT will just take a list of keys and insert them into the btree - | |
91 | * overwriting (possibly only partially) any extents they overlap with. This is | |
92 | * used to update the index after a write. | |
93 | * | |
94 | * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is | |
95 | * overwriting a key that matches another given key. This is used for inserting | |
96 | * data into the cache after a cache miss, and for background writeback, and for | |
97 | * the moving garbage collector. | |
98 | * | |
99 | * There is no "delete" operation; deleting things from the index is | |
100 | * accomplished by either by invalidating pointers (by incrementing a bucket's | |
101 | * gen) or by inserting a key with 0 pointers - which will overwrite anything | |
102 | * previously present at that location in the index. | |
103 | * | |
104 | * This means that there are always stale/invalid keys in the btree. They're | |
105 | * filtered out by the code that iterates through a btree node, and removed when | |
106 | * a btree node is rewritten. | |
107 | * | |
108 | * BTREE NODES: | |
109 | * | |
110 | * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and | |
111 | * free smaller than a bucket - so, that's how big our btree nodes are. | |
112 | * | |
113 | * (If buckets are really big we'll only use part of the bucket for a btree node | |
114 | * - no less than 1/4th - but a bucket still contains no more than a single | |
115 | * btree node. I'd actually like to change this, but for now we rely on the | |
116 | * bucket's gen for deleting btree nodes when we rewrite/split a node.) | |
117 | * | |
118 | * Anyways, btree nodes are big - big enough to be inefficient with a textbook | |
119 | * btree implementation. | |
120 | * | |
121 | * The way this is solved is that btree nodes are internally log structured; we | |
122 | * can append new keys to an existing btree node without rewriting it. This | |
123 | * means each set of keys we write is sorted, but the node is not. | |
124 | * | |
125 | * We maintain this log structure in memory - keeping 1Mb of keys sorted would | |
126 | * be expensive, and we have to distinguish between the keys we have written and | |
127 | * the keys we haven't. So to do a lookup in a btree node, we have to search | |
128 | * each sorted set. But we do merge written sets together lazily, so the cost of | |
129 | * these extra searches is quite low (normally most of the keys in a btree node | |
130 | * will be in one big set, and then there'll be one or two sets that are much | |
131 | * smaller). | |
132 | * | |
133 | * This log structure makes bcache's btree more of a hybrid between a | |
134 | * conventional btree and a compacting data structure, with some of the | |
135 | * advantages of both. | |
136 | * | |
137 | * GARBAGE COLLECTION: | |
138 | * | |
139 | * We can't just invalidate any bucket - it might contain dirty data or | |
140 | * metadata. If it once contained dirty data, other writes might overwrite it | |
141 | * later, leaving no valid pointers into that bucket in the index. | |
142 | * | |
143 | * Thus, the primary purpose of garbage collection is to find buckets to reuse. | |
144 | * It also counts how much valid data it each bucket currently contains, so that | |
145 | * allocation can reuse buckets sooner when they've been mostly overwritten. | |
146 | * | |
147 | * It also does some things that are really internal to the btree | |
148 | * implementation. If a btree node contains pointers that are stale by more than | |
149 | * some threshold, it rewrites the btree node to avoid the bucket's generation | |
150 | * wrapping around. It also merges adjacent btree nodes if they're empty enough. | |
151 | * | |
152 | * THE JOURNAL: | |
153 | * | |
154 | * Bcache's journal is not necessary for consistency; we always strictly | |
155 | * order metadata writes so that the btree and everything else is consistent on | |
156 | * disk in the event of an unclean shutdown, and in fact bcache had writeback | |
157 | * caching (with recovery from unclean shutdown) before journalling was | |
158 | * implemented. | |
159 | * | |
160 | * Rather, the journal is purely a performance optimization; we can't complete a | |
161 | * write until we've updated the index on disk, otherwise the cache would be | |
162 | * inconsistent in the event of an unclean shutdown. This means that without the | |
163 | * journal, on random write workloads we constantly have to update all the leaf | |
164 | * nodes in the btree, and those writes will be mostly empty (appending at most | |
165 | * a few keys each) - highly inefficient in terms of amount of metadata writes, | |
166 | * and it puts more strain on the various btree resorting/compacting code. | |
167 | * | |
168 | * The journal is just a log of keys we've inserted; on startup we just reinsert | |
169 | * all the keys in the open journal entries. That means that when we're updating | |
170 | * a node in the btree, we can wait until a 4k block of keys fills up before | |
171 | * writing them out. | |
172 | * | |
173 | * For simplicity, we only journal updates to leaf nodes; updates to parent | |
174 | * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth | |
175 | * the complexity to deal with journalling them (in particular, journal replay) | |
176 | * - updates to non leaf nodes just happen synchronously (see btree_split()). | |
177 | */ | |
178 | ||
179 | #undef pr_fmt | |
180 | #define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__ | |
181 | ||
182 | #include <linux/backing-dev-defs.h> | |
183 | #include <linux/bug.h> | |
184 | #include <linux/bio.h> | |
185 | #include <linux/closure.h> | |
186 | #include <linux/kobject.h> | |
187 | #include <linux/list.h> | |
188 | #include <linux/mutex.h> | |
189 | #include <linux/percpu-refcount.h> | |
190 | #include <linux/percpu-rwsem.h> | |
191 | #include <linux/rhashtable.h> | |
192 | #include <linux/rwsem.h> | |
193 | #include <linux/seqlock.h> | |
194 | #include <linux/shrinker.h> | |
195 | #include <linux/types.h> | |
196 | #include <linux/workqueue.h> | |
197 | #include <linux/zstd.h> | |
198 | ||
199 | #include "bcachefs_format.h" | |
200 | #include "fifo.h" | |
201 | #include "opts.h" | |
202 | #include "util.h" | |
203 | ||
204 | #define dynamic_fault(...) 0 | |
205 | #define race_fault(...) 0 | |
206 | ||
207 | #define bch2_fs_init_fault(name) \ | |
208 | dynamic_fault("bcachefs:bch_fs_init:" name) | |
209 | #define bch2_meta_read_fault(name) \ | |
210 | dynamic_fault("bcachefs:meta:read:" name) | |
211 | #define bch2_meta_write_fault(name) \ | |
212 | dynamic_fault("bcachefs:meta:write:" name) | |
213 | ||
214 | #ifdef __KERNEL__ | |
215 | #define bch2_fmt(_c, fmt) "bcachefs (%s): " fmt "\n", ((_c)->name) | |
216 | #else | |
217 | #define bch2_fmt(_c, fmt) fmt "\n" | |
218 | #endif | |
219 | ||
220 | #define bch_info(c, fmt, ...) \ | |
221 | printk(KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__) | |
222 | #define bch_notice(c, fmt, ...) \ | |
223 | printk(KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__) | |
224 | #define bch_warn(c, fmt, ...) \ | |
225 | printk(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__) | |
226 | #define bch_err(c, fmt, ...) \ | |
227 | printk(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__) | |
228 | ||
229 | #define bch_verbose(c, fmt, ...) \ | |
230 | do { \ | |
231 | if ((c)->opts.verbose_recovery) \ | |
232 | bch_info(c, fmt, ##__VA_ARGS__); \ | |
233 | } while (0) | |
234 | ||
235 | #define pr_verbose_init(opts, fmt, ...) \ | |
236 | do { \ | |
237 | if (opt_get(opts, verbose_init)) \ | |
238 | pr_info(fmt, ##__VA_ARGS__); \ | |
239 | } while (0) | |
240 | ||
241 | /* Parameters that are useful for debugging, but should always be compiled in: */ | |
242 | #define BCH_DEBUG_PARAMS_ALWAYS() \ | |
243 | BCH_DEBUG_PARAM(key_merging_disabled, \ | |
244 | "Disables merging of extents") \ | |
245 | BCH_DEBUG_PARAM(btree_gc_always_rewrite, \ | |
246 | "Causes mark and sweep to compact and rewrite every " \ | |
247 | "btree node it traverses") \ | |
248 | BCH_DEBUG_PARAM(btree_gc_rewrite_disabled, \ | |
249 | "Disables rewriting of btree nodes during mark and sweep")\ | |
250 | BCH_DEBUG_PARAM(btree_shrinker_disabled, \ | |
251 | "Disables the shrinker callback for the btree node cache") | |
252 | ||
253 | /* Parameters that should only be compiled in in debug mode: */ | |
254 | #define BCH_DEBUG_PARAMS_DEBUG() \ | |
255 | BCH_DEBUG_PARAM(expensive_debug_checks, \ | |
256 | "Enables various runtime debugging checks that " \ | |
257 | "significantly affect performance") \ | |
258 | BCH_DEBUG_PARAM(debug_check_bkeys, \ | |
259 | "Run bkey_debugcheck (primarily checking GC/allocation "\ | |
260 | "information) when iterating over keys") \ | |
261 | BCH_DEBUG_PARAM(verify_btree_ondisk, \ | |
262 | "Reread btree nodes at various points to verify the " \ | |
263 | "mergesort in the read path against modifications " \ | |
264 | "done in memory") \ | |
265 | BCH_DEBUG_PARAM(journal_seq_verify, \ | |
266 | "Store the journal sequence number in the version " \ | |
267 | "number of every btree key, and verify that btree " \ | |
268 | "update ordering is preserved during recovery") \ | |
269 | BCH_DEBUG_PARAM(inject_invalid_keys, \ | |
270 | "Store the journal sequence number in the version " \ | |
271 | "number of every btree key, and verify that btree " \ | |
272 | "update ordering is preserved during recovery") \ | |
b29e197a KO |
273 | BCH_DEBUG_PARAM(test_alloc_startup, \ |
274 | "Force allocator startup to use the slowpath where it" \ | |
275 | "can't find enough free buckets without invalidating" \ | |
276 | "cached data") | |
1c6fdbd8 KO |
277 | |
278 | #define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG() | |
279 | ||
280 | #ifdef CONFIG_BCACHEFS_DEBUG | |
281 | #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALL() | |
282 | #else | |
283 | #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS() | |
284 | #endif | |
285 | ||
286 | #define BCH_TIME_STATS() \ | |
287 | x(btree_node_mem_alloc) \ | |
288 | x(btree_gc) \ | |
289 | x(btree_split) \ | |
290 | x(btree_sort) \ | |
291 | x(btree_read) \ | |
292 | x(btree_lock_contended_read) \ | |
293 | x(btree_lock_contended_intent) \ | |
294 | x(btree_lock_contended_write) \ | |
295 | x(data_write) \ | |
296 | x(data_read) \ | |
297 | x(data_promote) \ | |
298 | x(journal_write) \ | |
299 | x(journal_delay) \ | |
300 | x(journal_blocked) \ | |
301 | x(journal_flush_seq) | |
302 | ||
303 | enum bch_time_stats { | |
304 | #define x(name) BCH_TIME_##name, | |
305 | BCH_TIME_STATS() | |
306 | #undef x | |
307 | BCH_TIME_STAT_NR | |
308 | }; | |
309 | ||
310 | #include "alloc_types.h" | |
311 | #include "btree_types.h" | |
312 | #include "buckets_types.h" | |
313 | #include "clock_types.h" | |
314 | #include "journal_types.h" | |
315 | #include "keylist_types.h" | |
316 | #include "quota_types.h" | |
317 | #include "rebalance_types.h" | |
7a920560 | 318 | #include "replicas_types.h" |
1c6fdbd8 KO |
319 | #include "super_types.h" |
320 | ||
321 | /* Number of nodes btree coalesce will try to coalesce at once */ | |
322 | #define GC_MERGE_NODES 4U | |
323 | ||
324 | /* Maximum number of nodes we might need to allocate atomically: */ | |
325 | #define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1)) | |
326 | ||
327 | /* Size of the freelist we allocate btree nodes from: */ | |
328 | #define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4) | |
329 | ||
330 | struct btree; | |
331 | ||
332 | enum gc_phase { | |
333 | GC_PHASE_START, | |
334 | GC_PHASE_SB, | |
335 | ||
336 | #define DEF_BTREE_ID(kwd, val, name) GC_PHASE_BTREE_##kwd, | |
337 | DEFINE_BCH_BTREE_IDS() | |
338 | #undef DEF_BTREE_ID | |
339 | ||
340 | GC_PHASE_PENDING_DELETE, | |
341 | GC_PHASE_ALLOC, | |
342 | GC_PHASE_DONE | |
343 | }; | |
344 | ||
345 | struct gc_pos { | |
346 | enum gc_phase phase; | |
347 | struct bpos pos; | |
348 | unsigned level; | |
349 | }; | |
350 | ||
351 | struct io_count { | |
352 | u64 sectors[2][BCH_DATA_NR]; | |
353 | }; | |
354 | ||
355 | struct bch_dev { | |
356 | struct kobject kobj; | |
357 | struct percpu_ref ref; | |
358 | struct completion ref_completion; | |
359 | struct percpu_ref io_ref; | |
360 | struct completion io_ref_completion; | |
361 | ||
362 | struct bch_fs *fs; | |
363 | ||
364 | u8 dev_idx; | |
365 | /* | |
366 | * Cached version of this device's member info from superblock | |
367 | * Committed by bch2_write_super() -> bch_fs_mi_update() | |
368 | */ | |
369 | struct bch_member_cpu mi; | |
370 | __uuid_t uuid; | |
371 | char name[BDEVNAME_SIZE]; | |
372 | ||
373 | struct bch_sb_handle disk_sb; | |
374 | int sb_write_error; | |
375 | ||
376 | struct bch_devs_mask self; | |
377 | ||
378 | /* biosets used in cloned bios for writing multiple replicas */ | |
379 | struct bio_set replica_set; | |
380 | ||
381 | /* | |
382 | * Buckets: | |
383 | * Per-bucket arrays are protected by c->usage_lock, bucket_lock and | |
384 | * gc_lock, for device resize - holding any is sufficient for access: | |
385 | * Or rcu_read_lock(), but only for ptr_stale(): | |
386 | */ | |
387 | struct bucket_array __rcu *buckets; | |
388 | unsigned long *buckets_dirty; | |
389 | /* most out of date gen in the btree */ | |
390 | u8 *oldest_gens; | |
391 | struct rw_semaphore bucket_lock; | |
392 | ||
393 | struct bch_dev_usage __percpu *usage_percpu; | |
394 | struct bch_dev_usage usage_cached; | |
395 | ||
396 | /* Allocator: */ | |
397 | struct task_struct __rcu *alloc_thread; | |
398 | ||
399 | /* | |
400 | * free: Buckets that are ready to be used | |
401 | * | |
402 | * free_inc: Incoming buckets - these are buckets that currently have | |
403 | * cached data in them, and we can't reuse them until after we write | |
404 | * their new gen to disk. After prio_write() finishes writing the new | |
405 | * gens/prios, they'll be moved to the free list (and possibly discarded | |
406 | * in the process) | |
407 | */ | |
408 | alloc_fifo free[RESERVE_NR]; | |
409 | alloc_fifo free_inc; | |
410 | spinlock_t freelist_lock; | |
1c6fdbd8 KO |
411 | |
412 | u8 open_buckets_partial[OPEN_BUCKETS_COUNT]; | |
413 | unsigned open_buckets_partial_nr; | |
414 | ||
415 | size_t fifo_last_bucket; | |
416 | ||
417 | /* last calculated minimum prio */ | |
418 | u16 max_last_bucket_io[2]; | |
419 | ||
1c6fdbd8 KO |
420 | size_t inc_gen_needs_gc; |
421 | size_t inc_gen_really_needs_gc; | |
1c6fdbd8 KO |
422 | bool allocator_blocked; |
423 | ||
424 | alloc_heap alloc_heap; | |
425 | ||
426 | /* Copying GC: */ | |
427 | struct task_struct *copygc_thread; | |
428 | copygc_heap copygc_heap; | |
429 | struct bch_pd_controller copygc_pd; | |
430 | struct write_point copygc_write_point; | |
a9bec520 | 431 | u64 copygc_threshold; |
1c6fdbd8 KO |
432 | |
433 | atomic64_t rebalance_work; | |
434 | ||
435 | struct journal_device journal; | |
436 | ||
437 | struct work_struct io_error_work; | |
438 | ||
439 | /* The rest of this all shows up in sysfs */ | |
440 | atomic64_t cur_latency[2]; | |
441 | struct bch2_time_stats io_latency[2]; | |
442 | ||
443 | #define CONGESTED_MAX 1024 | |
444 | atomic_t congested; | |
445 | u64 congested_last; | |
446 | ||
447 | struct io_count __percpu *io_done; | |
448 | }; | |
449 | ||
450 | /* | |
451 | * Flag bits for what phase of startup/shutdown the cache set is at, how we're | |
452 | * shutting down, etc.: | |
453 | * | |
454 | * BCH_FS_UNREGISTERING means we're not just shutting down, we're detaching | |
455 | * all the backing devices first (their cached data gets invalidated, and they | |
456 | * won't automatically reattach). | |
457 | */ | |
458 | enum { | |
459 | /* startup: */ | |
460 | BCH_FS_ALLOC_READ_DONE, | |
461 | BCH_FS_ALLOCATOR_STARTED, | |
462 | BCH_FS_INITIAL_GC_DONE, | |
463 | BCH_FS_FSCK_DONE, | |
464 | BCH_FS_STARTED, | |
465 | ||
466 | /* shutdown: */ | |
467 | BCH_FS_EMERGENCY_RO, | |
468 | BCH_FS_WRITE_DISABLE_COMPLETE, | |
469 | ||
470 | /* errors: */ | |
471 | BCH_FS_ERROR, | |
472 | BCH_FS_GC_FAILURE, | |
473 | ||
474 | /* misc: */ | |
475 | BCH_FS_BDEV_MOUNTED, | |
476 | BCH_FS_FSCK_FIXED_ERRORS, | |
88c07f73 | 477 | BCH_FS_FSCK_UNFIXED_ERRORS, |
1c6fdbd8 KO |
478 | BCH_FS_FIXED_GENS, |
479 | BCH_FS_REBUILD_REPLICAS, | |
480 | BCH_FS_HOLD_BTREE_WRITES, | |
481 | }; | |
482 | ||
483 | struct btree_debug { | |
484 | unsigned id; | |
485 | struct dentry *btree; | |
486 | struct dentry *btree_format; | |
487 | struct dentry *failed; | |
488 | }; | |
489 | ||
490 | enum bch_fs_state { | |
491 | BCH_FS_STARTING = 0, | |
492 | BCH_FS_STOPPING, | |
493 | BCH_FS_RO, | |
494 | BCH_FS_RW, | |
495 | }; | |
496 | ||
497 | struct bch_fs { | |
498 | struct closure cl; | |
499 | ||
500 | struct list_head list; | |
501 | struct kobject kobj; | |
502 | struct kobject internal; | |
503 | struct kobject opts_dir; | |
504 | struct kobject time_stats; | |
505 | unsigned long flags; | |
506 | ||
507 | int minor; | |
508 | struct device *chardev; | |
509 | struct super_block *vfs_sb; | |
510 | char name[40]; | |
511 | ||
512 | /* ro/rw, add/remove devices: */ | |
513 | struct mutex state_lock; | |
514 | enum bch_fs_state state; | |
515 | ||
516 | /* Counts outstanding writes, for clean transition to read-only */ | |
517 | struct percpu_ref writes; | |
518 | struct work_struct read_only_work; | |
519 | ||
520 | struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX]; | |
521 | ||
522 | struct bch_replicas_cpu __rcu *replicas; | |
523 | struct bch_replicas_cpu __rcu *replicas_gc; | |
524 | struct mutex replicas_gc_lock; | |
525 | ||
526 | struct bch_disk_groups_cpu __rcu *disk_groups; | |
527 | ||
528 | struct bch_opts opts; | |
529 | ||
530 | /* Updated by bch2_sb_update():*/ | |
531 | struct { | |
532 | __uuid_t uuid; | |
533 | __uuid_t user_uuid; | |
534 | ||
535 | u16 encoded_extent_max; | |
536 | ||
537 | u8 nr_devices; | |
538 | u8 clean; | |
539 | ||
540 | u8 encryption_type; | |
541 | ||
542 | u64 time_base_lo; | |
543 | u32 time_base_hi; | |
544 | u32 time_precision; | |
545 | u64 features; | |
546 | } sb; | |
547 | ||
548 | struct bch_sb_handle disk_sb; | |
549 | ||
550 | unsigned short block_bits; /* ilog2(block_size) */ | |
551 | ||
552 | u16 btree_foreground_merge_threshold; | |
553 | ||
554 | struct closure sb_write; | |
555 | struct mutex sb_lock; | |
556 | ||
557 | /* BTREE CACHE */ | |
558 | struct bio_set btree_bio; | |
559 | ||
560 | struct btree_root btree_roots[BTREE_ID_NR]; | |
561 | bool btree_roots_dirty; | |
562 | struct mutex btree_root_lock; | |
563 | ||
564 | struct btree_cache btree_cache; | |
565 | ||
566 | mempool_t btree_reserve_pool; | |
567 | ||
568 | /* | |
569 | * Cache of allocated btree nodes - if we allocate a btree node and | |
570 | * don't use it, if we free it that space can't be reused until going | |
571 | * _all_ the way through the allocator (which exposes us to a livelock | |
572 | * when allocating btree reserves fail halfway through) - instead, we | |
573 | * can stick them here: | |
574 | */ | |
575 | struct btree_alloc btree_reserve_cache[BTREE_NODE_RESERVE * 2]; | |
576 | unsigned btree_reserve_cache_nr; | |
577 | struct mutex btree_reserve_cache_lock; | |
578 | ||
579 | mempool_t btree_interior_update_pool; | |
580 | struct list_head btree_interior_update_list; | |
581 | struct mutex btree_interior_update_lock; | |
582 | struct closure_waitlist btree_interior_update_wait; | |
583 | ||
581edb63 KO |
584 | mempool_t btree_iters_pool; |
585 | ||
1c6fdbd8 KO |
586 | struct workqueue_struct *wq; |
587 | /* copygc needs its own workqueue for index updates.. */ | |
588 | struct workqueue_struct *copygc_wq; | |
589 | ||
590 | /* ALLOCATION */ | |
591 | struct delayed_work pd_controllers_update; | |
592 | unsigned pd_controllers_update_seconds; | |
593 | ||
594 | struct bch_devs_mask rw_devs[BCH_DATA_NR]; | |
595 | ||
596 | u64 capacity; /* sectors */ | |
597 | ||
598 | /* | |
599 | * When capacity _decreases_ (due to a disk being removed), we | |
600 | * increment capacity_gen - this invalidates outstanding reservations | |
601 | * and forces them to be revalidated | |
602 | */ | |
603 | u32 capacity_gen; | |
604 | ||
605 | atomic64_t sectors_available; | |
606 | ||
607 | struct bch_fs_usage __percpu *usage_percpu; | |
608 | struct bch_fs_usage usage_cached; | |
609 | struct percpu_rw_semaphore usage_lock; | |
610 | ||
611 | struct closure_waitlist freelist_wait; | |
612 | ||
613 | /* | |
614 | * When we invalidate buckets, we use both the priority and the amount | |
615 | * of good data to determine which buckets to reuse first - to weight | |
616 | * those together consistently we keep track of the smallest nonzero | |
617 | * priority of any bucket. | |
618 | */ | |
619 | struct bucket_clock bucket_clock[2]; | |
620 | ||
621 | struct io_clock io_clock[2]; | |
622 | ||
623 | /* ALLOCATOR */ | |
624 | spinlock_t freelist_lock; | |
625 | u8 open_buckets_freelist; | |
626 | u8 open_buckets_nr_free; | |
627 | struct closure_waitlist open_buckets_wait; | |
628 | struct open_bucket open_buckets[OPEN_BUCKETS_COUNT]; | |
629 | ||
630 | struct write_point btree_write_point; | |
631 | struct write_point rebalance_write_point; | |
632 | ||
633 | struct write_point write_points[WRITE_POINT_COUNT]; | |
634 | struct hlist_head write_points_hash[WRITE_POINT_COUNT]; | |
635 | struct mutex write_points_hash_lock; | |
636 | ||
637 | /* GARBAGE COLLECTION */ | |
638 | struct task_struct *gc_thread; | |
639 | atomic_t kick_gc; | |
640 | unsigned long gc_count; | |
641 | ||
642 | /* | |
643 | * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos] | |
644 | * has been marked by GC. | |
645 | * | |
646 | * gc_cur_phase is a superset of btree_ids (BTREE_ID_EXTENTS etc.) | |
647 | * | |
648 | * gc_cur_phase == GC_PHASE_DONE indicates that gc is finished/not | |
649 | * currently running, and gc marks are currently valid | |
650 | * | |
651 | * Protected by gc_pos_lock. Only written to by GC thread, so GC thread | |
652 | * can read without a lock. | |
653 | */ | |
654 | seqcount_t gc_pos_lock; | |
655 | struct gc_pos gc_pos; | |
656 | ||
657 | /* | |
658 | * The allocation code needs gc_mark in struct bucket to be correct, but | |
659 | * it's not while a gc is in progress. | |
660 | */ | |
661 | struct rw_semaphore gc_lock; | |
662 | ||
663 | /* IO PATH */ | |
664 | struct bio_set bio_read; | |
665 | struct bio_set bio_read_split; | |
666 | struct bio_set bio_write; | |
667 | struct mutex bio_bounce_pages_lock; | |
668 | mempool_t bio_bounce_pages; | |
669 | struct rhashtable promote_table; | |
670 | ||
671 | mempool_t compression_bounce[2]; | |
672 | mempool_t compress_workspace[BCH_COMPRESSION_NR]; | |
673 | mempool_t decompress_workspace; | |
674 | ZSTD_parameters zstd_params; | |
675 | ||
676 | struct crypto_shash *sha256; | |
677 | struct crypto_sync_skcipher *chacha20; | |
678 | struct crypto_shash *poly1305; | |
679 | ||
680 | atomic64_t key_version; | |
681 | ||
682 | /* REBALANCE */ | |
683 | struct bch_fs_rebalance rebalance; | |
684 | ||
685 | /* VFS IO PATH - fs-io.c */ | |
686 | struct bio_set writepage_bioset; | |
687 | struct bio_set dio_write_bioset; | |
688 | struct bio_set dio_read_bioset; | |
689 | ||
690 | struct bio_list btree_write_error_list; | |
691 | struct work_struct btree_write_error_work; | |
692 | spinlock_t btree_write_error_lock; | |
693 | ||
694 | /* ERRORS */ | |
695 | struct list_head fsck_errors; | |
696 | struct mutex fsck_error_lock; | |
697 | bool fsck_alloc_err; | |
698 | ||
699 | /* FILESYSTEM */ | |
700 | atomic_long_t nr_inodes; | |
701 | ||
702 | /* QUOTAS */ | |
703 | struct bch_memquota_type quotas[QTYP_NR]; | |
704 | ||
705 | /* DEBUG JUNK */ | |
706 | struct dentry *debug; | |
707 | struct btree_debug btree_debug[BTREE_ID_NR]; | |
708 | #ifdef CONFIG_BCACHEFS_DEBUG | |
709 | struct btree *verify_data; | |
710 | struct btree_node *verify_ondisk; | |
711 | struct mutex verify_lock; | |
712 | #endif | |
713 | ||
714 | u64 unused_inode_hint; | |
715 | ||
716 | /* | |
717 | * A btree node on disk could have too many bsets for an iterator to fit | |
718 | * on the stack - have to dynamically allocate them | |
719 | */ | |
720 | mempool_t fill_iter; | |
721 | ||
722 | mempool_t btree_bounce_pool; | |
723 | ||
724 | struct journal journal; | |
725 | ||
c6923995 | 726 | u64 last_bucket_seq_cleanup; |
1c6fdbd8 KO |
727 | |
728 | /* The rest of this all shows up in sysfs */ | |
729 | atomic_long_t read_realloc_races; | |
730 | atomic_long_t extent_migrate_done; | |
731 | atomic_long_t extent_migrate_raced; | |
732 | ||
733 | unsigned btree_gc_periodic:1; | |
734 | unsigned copy_gc_enabled:1; | |
735 | bool promote_whole_extents; | |
736 | ||
737 | #define BCH_DEBUG_PARAM(name, description) bool name; | |
738 | BCH_DEBUG_PARAMS_ALL() | |
739 | #undef BCH_DEBUG_PARAM | |
740 | ||
741 | struct bch2_time_stats times[BCH_TIME_STAT_NR]; | |
742 | }; | |
743 | ||
744 | static inline void bch2_set_ra_pages(struct bch_fs *c, unsigned ra_pages) | |
745 | { | |
746 | #ifndef NO_BCACHEFS_FS | |
747 | if (c->vfs_sb) | |
748 | c->vfs_sb->s_bdi->ra_pages = ra_pages; | |
749 | #endif | |
750 | } | |
751 | ||
752 | static inline bool bch2_fs_running(struct bch_fs *c) | |
753 | { | |
754 | return c->state == BCH_FS_RO || c->state == BCH_FS_RW; | |
755 | } | |
756 | ||
757 | static inline unsigned bucket_bytes(const struct bch_dev *ca) | |
758 | { | |
759 | return ca->mi.bucket_size << 9; | |
760 | } | |
761 | ||
762 | static inline unsigned block_bytes(const struct bch_fs *c) | |
763 | { | |
764 | return c->opts.block_size << 9; | |
765 | } | |
766 | ||
767 | static inline struct timespec64 bch2_time_to_timespec(struct bch_fs *c, u64 time) | |
768 | { | |
769 | return ns_to_timespec64(time * c->sb.time_precision + c->sb.time_base_lo); | |
770 | } | |
771 | ||
772 | static inline s64 timespec_to_bch2_time(struct bch_fs *c, struct timespec64 ts) | |
773 | { | |
774 | s64 ns = timespec64_to_ns(&ts) - c->sb.time_base_lo; | |
775 | ||
776 | if (c->sb.time_precision == 1) | |
777 | return ns; | |
778 | ||
779 | return div_s64(ns, c->sb.time_precision); | |
780 | } | |
781 | ||
782 | static inline s64 bch2_current_time(struct bch_fs *c) | |
783 | { | |
784 | struct timespec64 now; | |
785 | ||
786 | ktime_get_real_ts64(&now); | |
787 | return timespec_to_bch2_time(c, now); | |
788 | } | |
789 | ||
790 | #endif /* _BCACHEFS_H */ |