Commit | Line | Data |
---|---|---|
1c6fdbd8 KO |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _BCACHEFS_H | |
3 | #define _BCACHEFS_H | |
4 | ||
5 | /* | |
6 | * SOME HIGH LEVEL CODE DOCUMENTATION: | |
7 | * | |
8 | * Bcache mostly works with cache sets, cache devices, and backing devices. | |
9 | * | |
10 | * Support for multiple cache devices hasn't quite been finished off yet, but | |
11 | * it's about 95% plumbed through. A cache set and its cache devices is sort of | |
12 | * like a md raid array and its component devices. Most of the code doesn't care | |
13 | * about individual cache devices, the main abstraction is the cache set. | |
14 | * | |
15 | * Multiple cache devices is intended to give us the ability to mirror dirty | |
16 | * cached data and metadata, without mirroring clean cached data. | |
17 | * | |
18 | * Backing devices are different, in that they have a lifetime independent of a | |
19 | * cache set. When you register a newly formatted backing device it'll come up | |
20 | * in passthrough mode, and then you can attach and detach a backing device from | |
21 | * a cache set at runtime - while it's mounted and in use. Detaching implicitly | |
22 | * invalidates any cached data for that backing device. | |
23 | * | |
24 | * A cache set can have multiple (many) backing devices attached to it. | |
25 | * | |
26 | * There's also flash only volumes - this is the reason for the distinction | |
27 | * between struct cached_dev and struct bcache_device. A flash only volume | |
28 | * works much like a bcache device that has a backing device, except the | |
29 | * "cached" data is always dirty. The end result is that we get thin | |
30 | * provisioning with very little additional code. | |
31 | * | |
32 | * Flash only volumes work but they're not production ready because the moving | |
33 | * garbage collector needs more work. More on that later. | |
34 | * | |
35 | * BUCKETS/ALLOCATION: | |
36 | * | |
37 | * Bcache is primarily designed for caching, which means that in normal | |
38 | * operation all of our available space will be allocated. Thus, we need an | |
39 | * efficient way of deleting things from the cache so we can write new things to | |
40 | * it. | |
41 | * | |
42 | * To do this, we first divide the cache device up into buckets. A bucket is the | |
43 | * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+ | |
44 | * works efficiently. | |
45 | * | |
46 | * Each bucket has a 16 bit priority, and an 8 bit generation associated with | |
47 | * it. The gens and priorities for all the buckets are stored contiguously and | |
48 | * packed on disk (in a linked list of buckets - aside from the superblock, all | |
49 | * of bcache's metadata is stored in buckets). | |
50 | * | |
51 | * The priority is used to implement an LRU. We reset a bucket's priority when | |
52 | * we allocate it or on cache it, and every so often we decrement the priority | |
53 | * of each bucket. It could be used to implement something more sophisticated, | |
54 | * if anyone ever gets around to it. | |
55 | * | |
56 | * The generation is used for invalidating buckets. Each pointer also has an 8 | |
57 | * bit generation embedded in it; for a pointer to be considered valid, its gen | |
58 | * must match the gen of the bucket it points into. Thus, to reuse a bucket all | |
59 | * we have to do is increment its gen (and write its new gen to disk; we batch | |
60 | * this up). | |
61 | * | |
62 | * Bcache is entirely COW - we never write twice to a bucket, even buckets that | |
63 | * contain metadata (including btree nodes). | |
64 | * | |
65 | * THE BTREE: | |
66 | * | |
67 | * Bcache is in large part design around the btree. | |
68 | * | |
69 | * At a high level, the btree is just an index of key -> ptr tuples. | |
70 | * | |
71 | * Keys represent extents, and thus have a size field. Keys also have a variable | |
72 | * number of pointers attached to them (potentially zero, which is handy for | |
73 | * invalidating the cache). | |
74 | * | |
75 | * The key itself is an inode:offset pair. The inode number corresponds to a | |
76 | * backing device or a flash only volume. The offset is the ending offset of the | |
77 | * extent within the inode - not the starting offset; this makes lookups | |
78 | * slightly more convenient. | |
79 | * | |
80 | * Pointers contain the cache device id, the offset on that device, and an 8 bit | |
81 | * generation number. More on the gen later. | |
82 | * | |
83 | * Index lookups are not fully abstracted - cache lookups in particular are | |
84 | * still somewhat mixed in with the btree code, but things are headed in that | |
85 | * direction. | |
86 | * | |
87 | * Updates are fairly well abstracted, though. There are two different ways of | |
88 | * updating the btree; insert and replace. | |
89 | * | |
90 | * BTREE_INSERT will just take a list of keys and insert them into the btree - | |
91 | * overwriting (possibly only partially) any extents they overlap with. This is | |
92 | * used to update the index after a write. | |
93 | * | |
94 | * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is | |
95 | * overwriting a key that matches another given key. This is used for inserting | |
96 | * data into the cache after a cache miss, and for background writeback, and for | |
97 | * the moving garbage collector. | |
98 | * | |
99 | * There is no "delete" operation; deleting things from the index is | |
100 | * accomplished by either by invalidating pointers (by incrementing a bucket's | |
101 | * gen) or by inserting a key with 0 pointers - which will overwrite anything | |
102 | * previously present at that location in the index. | |
103 | * | |
104 | * This means that there are always stale/invalid keys in the btree. They're | |
105 | * filtered out by the code that iterates through a btree node, and removed when | |
106 | * a btree node is rewritten. | |
107 | * | |
108 | * BTREE NODES: | |
109 | * | |
110 | * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and | |
111 | * free smaller than a bucket - so, that's how big our btree nodes are. | |
112 | * | |
113 | * (If buckets are really big we'll only use part of the bucket for a btree node | |
114 | * - no less than 1/4th - but a bucket still contains no more than a single | |
115 | * btree node. I'd actually like to change this, but for now we rely on the | |
116 | * bucket's gen for deleting btree nodes when we rewrite/split a node.) | |
117 | * | |
118 | * Anyways, btree nodes are big - big enough to be inefficient with a textbook | |
119 | * btree implementation. | |
120 | * | |
121 | * The way this is solved is that btree nodes are internally log structured; we | |
122 | * can append new keys to an existing btree node without rewriting it. This | |
123 | * means each set of keys we write is sorted, but the node is not. | |
124 | * | |
125 | * We maintain this log structure in memory - keeping 1Mb of keys sorted would | |
126 | * be expensive, and we have to distinguish between the keys we have written and | |
127 | * the keys we haven't. So to do a lookup in a btree node, we have to search | |
128 | * each sorted set. But we do merge written sets together lazily, so the cost of | |
129 | * these extra searches is quite low (normally most of the keys in a btree node | |
130 | * will be in one big set, and then there'll be one or two sets that are much | |
131 | * smaller). | |
132 | * | |
133 | * This log structure makes bcache's btree more of a hybrid between a | |
134 | * conventional btree and a compacting data structure, with some of the | |
135 | * advantages of both. | |
136 | * | |
137 | * GARBAGE COLLECTION: | |
138 | * | |
139 | * We can't just invalidate any bucket - it might contain dirty data or | |
140 | * metadata. If it once contained dirty data, other writes might overwrite it | |
141 | * later, leaving no valid pointers into that bucket in the index. | |
142 | * | |
143 | * Thus, the primary purpose of garbage collection is to find buckets to reuse. | |
144 | * It also counts how much valid data it each bucket currently contains, so that | |
145 | * allocation can reuse buckets sooner when they've been mostly overwritten. | |
146 | * | |
147 | * It also does some things that are really internal to the btree | |
148 | * implementation. If a btree node contains pointers that are stale by more than | |
149 | * some threshold, it rewrites the btree node to avoid the bucket's generation | |
150 | * wrapping around. It also merges adjacent btree nodes if they're empty enough. | |
151 | * | |
152 | * THE JOURNAL: | |
153 | * | |
154 | * Bcache's journal is not necessary for consistency; we always strictly | |
155 | * order metadata writes so that the btree and everything else is consistent on | |
156 | * disk in the event of an unclean shutdown, and in fact bcache had writeback | |
157 | * caching (with recovery from unclean shutdown) before journalling was | |
158 | * implemented. | |
159 | * | |
160 | * Rather, the journal is purely a performance optimization; we can't complete a | |
161 | * write until we've updated the index on disk, otherwise the cache would be | |
162 | * inconsistent in the event of an unclean shutdown. This means that without the | |
163 | * journal, on random write workloads we constantly have to update all the leaf | |
164 | * nodes in the btree, and those writes will be mostly empty (appending at most | |
165 | * a few keys each) - highly inefficient in terms of amount of metadata writes, | |
166 | * and it puts more strain on the various btree resorting/compacting code. | |
167 | * | |
168 | * The journal is just a log of keys we've inserted; on startup we just reinsert | |
169 | * all the keys in the open journal entries. That means that when we're updating | |
170 | * a node in the btree, we can wait until a 4k block of keys fills up before | |
171 | * writing them out. | |
172 | * | |
173 | * For simplicity, we only journal updates to leaf nodes; updates to parent | |
174 | * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth | |
175 | * the complexity to deal with journalling them (in particular, journal replay) | |
176 | * - updates to non leaf nodes just happen synchronously (see btree_split()). | |
177 | */ | |
178 | ||
179 | #undef pr_fmt | |
180 | #define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__ | |
181 | ||
182 | #include <linux/backing-dev-defs.h> | |
183 | #include <linux/bug.h> | |
184 | #include <linux/bio.h> | |
185 | #include <linux/closure.h> | |
186 | #include <linux/kobject.h> | |
187 | #include <linux/list.h> | |
1dd7f9d9 | 188 | #include <linux/math64.h> |
1c6fdbd8 KO |
189 | #include <linux/mutex.h> |
190 | #include <linux/percpu-refcount.h> | |
191 | #include <linux/percpu-rwsem.h> | |
192 | #include <linux/rhashtable.h> | |
193 | #include <linux/rwsem.h> | |
194 | #include <linux/seqlock.h> | |
195 | #include <linux/shrinker.h> | |
196 | #include <linux/types.h> | |
197 | #include <linux/workqueue.h> | |
198 | #include <linux/zstd.h> | |
199 | ||
200 | #include "bcachefs_format.h" | |
201 | #include "fifo.h" | |
202 | #include "opts.h" | |
203 | #include "util.h" | |
204 | ||
205 | #define dynamic_fault(...) 0 | |
206 | #define race_fault(...) 0 | |
207 | ||
cd575ddf | 208 | #define bch2_fs_init_fault(name) \ |
1c6fdbd8 KO |
209 | dynamic_fault("bcachefs:bch_fs_init:" name) |
210 | #define bch2_meta_read_fault(name) \ | |
211 | dynamic_fault("bcachefs:meta:read:" name) | |
212 | #define bch2_meta_write_fault(name) \ | |
213 | dynamic_fault("bcachefs:meta:write:" name) | |
214 | ||
215 | #ifdef __KERNEL__ | |
216 | #define bch2_fmt(_c, fmt) "bcachefs (%s): " fmt "\n", ((_c)->name) | |
217 | #else | |
218 | #define bch2_fmt(_c, fmt) fmt "\n" | |
219 | #endif | |
220 | ||
221 | #define bch_info(c, fmt, ...) \ | |
222 | printk(KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__) | |
223 | #define bch_notice(c, fmt, ...) \ | |
224 | printk(KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__) | |
225 | #define bch_warn(c, fmt, ...) \ | |
226 | printk(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__) | |
ac7f0d77 KO |
227 | #define bch_warn_ratelimited(c, fmt, ...) \ |
228 | printk_ratelimited(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__) | |
1c6fdbd8 KO |
229 | #define bch_err(c, fmt, ...) \ |
230 | printk(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__) | |
dfe9bfb3 KO |
231 | #define bch_err_ratelimited(c, fmt, ...) \ |
232 | printk_ratelimited(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__) | |
1c6fdbd8 KO |
233 | |
234 | #define bch_verbose(c, fmt, ...) \ | |
235 | do { \ | |
0b847a19 | 236 | if ((c)->opts.verbose) \ |
1c6fdbd8 KO |
237 | bch_info(c, fmt, ##__VA_ARGS__); \ |
238 | } while (0) | |
239 | ||
240 | #define pr_verbose_init(opts, fmt, ...) \ | |
241 | do { \ | |
0b847a19 | 242 | if (opt_get(opts, verbose)) \ |
1c6fdbd8 KO |
243 | pr_info(fmt, ##__VA_ARGS__); \ |
244 | } while (0) | |
245 | ||
246 | /* Parameters that are useful for debugging, but should always be compiled in: */ | |
247 | #define BCH_DEBUG_PARAMS_ALWAYS() \ | |
248 | BCH_DEBUG_PARAM(key_merging_disabled, \ | |
249 | "Disables merging of extents") \ | |
250 | BCH_DEBUG_PARAM(btree_gc_always_rewrite, \ | |
251 | "Causes mark and sweep to compact and rewrite every " \ | |
252 | "btree node it traverses") \ | |
253 | BCH_DEBUG_PARAM(btree_gc_rewrite_disabled, \ | |
254 | "Disables rewriting of btree nodes during mark and sweep")\ | |
255 | BCH_DEBUG_PARAM(btree_shrinker_disabled, \ | |
256 | "Disables the shrinker callback for the btree node cache") | |
257 | ||
258 | /* Parameters that should only be compiled in in debug mode: */ | |
259 | #define BCH_DEBUG_PARAMS_DEBUG() \ | |
260 | BCH_DEBUG_PARAM(expensive_debug_checks, \ | |
261 | "Enables various runtime debugging checks that " \ | |
262 | "significantly affect performance") \ | |
f13f5a8c KO |
263 | BCH_DEBUG_PARAM(debug_check_iterators, \ |
264 | "Enables extra verification for btree iterators") \ | |
1c6fdbd8 KO |
265 | BCH_DEBUG_PARAM(debug_check_bkeys, \ |
266 | "Run bkey_debugcheck (primarily checking GC/allocation "\ | |
267 | "information) when iterating over keys") \ | |
268 | BCH_DEBUG_PARAM(verify_btree_ondisk, \ | |
269 | "Reread btree nodes at various points to verify the " \ | |
270 | "mergesort in the read path against modifications " \ | |
271 | "done in memory") \ | |
272 | BCH_DEBUG_PARAM(journal_seq_verify, \ | |
273 | "Store the journal sequence number in the version " \ | |
274 | "number of every btree key, and verify that btree " \ | |
275 | "update ordering is preserved during recovery") \ | |
276 | BCH_DEBUG_PARAM(inject_invalid_keys, \ | |
277 | "Store the journal sequence number in the version " \ | |
278 | "number of every btree key, and verify that btree " \ | |
279 | "update ordering is preserved during recovery") \ | |
b29e197a KO |
280 | BCH_DEBUG_PARAM(test_alloc_startup, \ |
281 | "Force allocator startup to use the slowpath where it" \ | |
282 | "can't find enough free buckets without invalidating" \ | |
cd575ddf KO |
283 | "cached data") \ |
284 | BCH_DEBUG_PARAM(force_reconstruct_read, \ | |
285 | "Force reads to use the reconstruct path, when reading" \ | |
6122ab63 KO |
286 | "from erasure coded extents") \ |
287 | BCH_DEBUG_PARAM(test_restart_gc, \ | |
288 | "Test restarting mark and sweep gc when bucket gens change")\ | |
289 | BCH_DEBUG_PARAM(test_reconstruct_alloc, \ | |
290 | "Test reconstructing the alloc btree") | |
1c6fdbd8 KO |
291 | |
292 | #define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG() | |
293 | ||
294 | #ifdef CONFIG_BCACHEFS_DEBUG | |
295 | #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALL() | |
296 | #else | |
297 | #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS() | |
298 | #endif | |
299 | ||
300 | #define BCH_TIME_STATS() \ | |
301 | x(btree_node_mem_alloc) \ | |
dc3b63dc KO |
302 | x(btree_node_split) \ |
303 | x(btree_node_sort) \ | |
304 | x(btree_node_read) \ | |
1c6fdbd8 | 305 | x(btree_gc) \ |
dc3b63dc | 306 | x(btree_update) \ |
1c6fdbd8 KO |
307 | x(btree_lock_contended_read) \ |
308 | x(btree_lock_contended_intent) \ | |
309 | x(btree_lock_contended_write) \ | |
310 | x(data_write) \ | |
311 | x(data_read) \ | |
312 | x(data_promote) \ | |
313 | x(journal_write) \ | |
314 | x(journal_delay) \ | |
49a67206 KO |
315 | x(journal_flush_seq) \ |
316 | x(blocked_journal) \ | |
317 | x(blocked_allocate) \ | |
318 | x(blocked_allocate_open_bucket) | |
1c6fdbd8 KO |
319 | |
320 | enum bch_time_stats { | |
321 | #define x(name) BCH_TIME_##name, | |
322 | BCH_TIME_STATS() | |
323 | #undef x | |
324 | BCH_TIME_STAT_NR | |
325 | }; | |
326 | ||
327 | #include "alloc_types.h" | |
328 | #include "btree_types.h" | |
329 | #include "buckets_types.h" | |
330 | #include "clock_types.h" | |
cd575ddf | 331 | #include "ec_types.h" |
1c6fdbd8 KO |
332 | #include "journal_types.h" |
333 | #include "keylist_types.h" | |
334 | #include "quota_types.h" | |
335 | #include "rebalance_types.h" | |
7a920560 | 336 | #include "replicas_types.h" |
1c6fdbd8 KO |
337 | #include "super_types.h" |
338 | ||
339 | /* Number of nodes btree coalesce will try to coalesce at once */ | |
340 | #define GC_MERGE_NODES 4U | |
341 | ||
342 | /* Maximum number of nodes we might need to allocate atomically: */ | |
343 | #define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1)) | |
344 | ||
345 | /* Size of the freelist we allocate btree nodes from: */ | |
8b335bae | 346 | #define BTREE_NODE_RESERVE BTREE_RESERVE_MAX |
1c6fdbd8 | 347 | |
b030f691 KO |
348 | #define BTREE_NODE_OPEN_BUCKET_RESERVE (BTREE_RESERVE_MAX * BCH_REPLICAS_MAX) |
349 | ||
1c6fdbd8 KO |
350 | struct btree; |
351 | ||
352 | enum gc_phase { | |
dfe9bfb3 | 353 | GC_PHASE_NOT_RUNNING, |
1c6fdbd8 KO |
354 | GC_PHASE_START, |
355 | GC_PHASE_SB, | |
356 | ||
cd575ddf KO |
357 | GC_PHASE_BTREE_EC, |
358 | GC_PHASE_BTREE_EXTENTS, | |
359 | GC_PHASE_BTREE_INODES, | |
360 | GC_PHASE_BTREE_DIRENTS, | |
361 | GC_PHASE_BTREE_XATTRS, | |
362 | GC_PHASE_BTREE_ALLOC, | |
363 | GC_PHASE_BTREE_QUOTAS, | |
1c6fdbd8 KO |
364 | |
365 | GC_PHASE_PENDING_DELETE, | |
366 | GC_PHASE_ALLOC, | |
1c6fdbd8 KO |
367 | }; |
368 | ||
369 | struct gc_pos { | |
370 | enum gc_phase phase; | |
371 | struct bpos pos; | |
372 | unsigned level; | |
373 | }; | |
374 | ||
375 | struct io_count { | |
376 | u64 sectors[2][BCH_DATA_NR]; | |
377 | }; | |
378 | ||
379 | struct bch_dev { | |
380 | struct kobject kobj; | |
381 | struct percpu_ref ref; | |
382 | struct completion ref_completion; | |
383 | struct percpu_ref io_ref; | |
384 | struct completion io_ref_completion; | |
385 | ||
386 | struct bch_fs *fs; | |
387 | ||
388 | u8 dev_idx; | |
389 | /* | |
390 | * Cached version of this device's member info from superblock | |
391 | * Committed by bch2_write_super() -> bch_fs_mi_update() | |
392 | */ | |
393 | struct bch_member_cpu mi; | |
394 | __uuid_t uuid; | |
395 | char name[BDEVNAME_SIZE]; | |
396 | ||
397 | struct bch_sb_handle disk_sb; | |
03e183cb | 398 | struct bch_sb *sb_read_scratch; |
1c6fdbd8 KO |
399 | int sb_write_error; |
400 | ||
401 | struct bch_devs_mask self; | |
402 | ||
403 | /* biosets used in cloned bios for writing multiple replicas */ | |
404 | struct bio_set replica_set; | |
405 | ||
406 | /* | |
407 | * Buckets: | |
9166b41d | 408 | * Per-bucket arrays are protected by c->mark_lock, bucket_lock and |
1c6fdbd8 KO |
409 | * gc_lock, for device resize - holding any is sufficient for access: |
410 | * Or rcu_read_lock(), but only for ptr_stale(): | |
411 | */ | |
9ca53b55 | 412 | struct bucket_array __rcu *buckets[2]; |
8eb7f3ee | 413 | unsigned long *buckets_nouse; |
61274e9d | 414 | unsigned long *buckets_written; |
1c6fdbd8 KO |
415 | struct rw_semaphore bucket_lock; |
416 | ||
9ca53b55 | 417 | struct bch_dev_usage __percpu *usage[2]; |
1c6fdbd8 KO |
418 | |
419 | /* Allocator: */ | |
420 | struct task_struct __rcu *alloc_thread; | |
421 | ||
422 | /* | |
423 | * free: Buckets that are ready to be used | |
424 | * | |
425 | * free_inc: Incoming buckets - these are buckets that currently have | |
426 | * cached data in them, and we can't reuse them until after we write | |
427 | * their new gen to disk. After prio_write() finishes writing the new | |
428 | * gens/prios, they'll be moved to the free list (and possibly discarded | |
429 | * in the process) | |
430 | */ | |
431 | alloc_fifo free[RESERVE_NR]; | |
432 | alloc_fifo free_inc; | |
433 | spinlock_t freelist_lock; | |
1c6fdbd8 KO |
434 | |
435 | u8 open_buckets_partial[OPEN_BUCKETS_COUNT]; | |
436 | unsigned open_buckets_partial_nr; | |
437 | ||
438 | size_t fifo_last_bucket; | |
439 | ||
440 | /* last calculated minimum prio */ | |
441 | u16 max_last_bucket_io[2]; | |
442 | ||
1c6fdbd8 KO |
443 | size_t inc_gen_needs_gc; |
444 | size_t inc_gen_really_needs_gc; | |
430735cd KO |
445 | |
446 | /* | |
447 | * XXX: this should be an enum for allocator state, so as to include | |
448 | * error state | |
449 | */ | |
1c6fdbd8 | 450 | bool allocator_blocked; |
430735cd | 451 | bool allocator_blocked_full; |
1c6fdbd8 KO |
452 | |
453 | alloc_heap alloc_heap; | |
454 | ||
455 | /* Copying GC: */ | |
456 | struct task_struct *copygc_thread; | |
457 | copygc_heap copygc_heap; | |
458 | struct bch_pd_controller copygc_pd; | |
459 | struct write_point copygc_write_point; | |
a9bec520 | 460 | u64 copygc_threshold; |
1c6fdbd8 KO |
461 | |
462 | atomic64_t rebalance_work; | |
463 | ||
464 | struct journal_device journal; | |
465 | ||
466 | struct work_struct io_error_work; | |
467 | ||
468 | /* The rest of this all shows up in sysfs */ | |
469 | atomic64_t cur_latency[2]; | |
470 | struct bch2_time_stats io_latency[2]; | |
471 | ||
472 | #define CONGESTED_MAX 1024 | |
473 | atomic_t congested; | |
474 | u64 congested_last; | |
475 | ||
476 | struct io_count __percpu *io_done; | |
477 | }; | |
478 | ||
1c6fdbd8 KO |
479 | enum { |
480 | /* startup: */ | |
481 | BCH_FS_ALLOC_READ_DONE, | |
482 | BCH_FS_ALLOCATOR_STARTED, | |
b935a8a6 | 483 | BCH_FS_ALLOCATOR_RUNNING, |
1c6fdbd8 KO |
484 | BCH_FS_INITIAL_GC_DONE, |
485 | BCH_FS_FSCK_DONE, | |
486 | BCH_FS_STARTED, | |
134915f3 | 487 | BCH_FS_RW, |
1c6fdbd8 KO |
488 | |
489 | /* shutdown: */ | |
1dd7f9d9 | 490 | BCH_FS_STOPPING, |
1c6fdbd8 KO |
491 | BCH_FS_EMERGENCY_RO, |
492 | BCH_FS_WRITE_DISABLE_COMPLETE, | |
493 | ||
494 | /* errors: */ | |
495 | BCH_FS_ERROR, | |
0bc166ff | 496 | BCH_FS_ERRORS_FIXED, |
1c6fdbd8 KO |
497 | |
498 | /* misc: */ | |
499 | BCH_FS_BDEV_MOUNTED, | |
1c6fdbd8 KO |
500 | BCH_FS_FIXED_GENS, |
501 | BCH_FS_REBUILD_REPLICAS, | |
502 | BCH_FS_HOLD_BTREE_WRITES, | |
503 | }; | |
504 | ||
505 | struct btree_debug { | |
506 | unsigned id; | |
507 | struct dentry *btree; | |
508 | struct dentry *btree_format; | |
509 | struct dentry *failed; | |
510 | }; | |
511 | ||
5663a415 KO |
512 | struct bch_fs_pcpu { |
513 | u64 sectors_available; | |
514 | }; | |
515 | ||
1dd7f9d9 KO |
516 | struct journal_seq_blacklist_table { |
517 | size_t nr; | |
518 | struct journal_seq_blacklist_table_entry { | |
519 | u64 start; | |
520 | u64 end; | |
521 | bool dirty; | |
522 | } entries[0]; | |
523 | }; | |
524 | ||
1c6fdbd8 KO |
525 | struct bch_fs { |
526 | struct closure cl; | |
527 | ||
528 | struct list_head list; | |
529 | struct kobject kobj; | |
530 | struct kobject internal; | |
531 | struct kobject opts_dir; | |
532 | struct kobject time_stats; | |
533 | unsigned long flags; | |
534 | ||
535 | int minor; | |
536 | struct device *chardev; | |
537 | struct super_block *vfs_sb; | |
538 | char name[40]; | |
539 | ||
540 | /* ro/rw, add/remove devices: */ | |
541 | struct mutex state_lock; | |
1c6fdbd8 KO |
542 | |
543 | /* Counts outstanding writes, for clean transition to read-only */ | |
544 | struct percpu_ref writes; | |
545 | struct work_struct read_only_work; | |
546 | ||
547 | struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX]; | |
548 | ||
73e6ab95 KO |
549 | struct bch_replicas_cpu replicas; |
550 | struct bch_replicas_cpu replicas_gc; | |
1c6fdbd8 KO |
551 | struct mutex replicas_gc_lock; |
552 | ||
2c5af169 KO |
553 | struct journal_entry_res replicas_journal_res; |
554 | ||
1c6fdbd8 KO |
555 | struct bch_disk_groups_cpu __rcu *disk_groups; |
556 | ||
557 | struct bch_opts opts; | |
558 | ||
559 | /* Updated by bch2_sb_update():*/ | |
560 | struct { | |
561 | __uuid_t uuid; | |
562 | __uuid_t user_uuid; | |
563 | ||
26609b61 | 564 | u16 version; |
1c6fdbd8 KO |
565 | u16 encoded_extent_max; |
566 | ||
567 | u8 nr_devices; | |
568 | u8 clean; | |
569 | ||
570 | u8 encryption_type; | |
571 | ||
572 | u64 time_base_lo; | |
573 | u32 time_base_hi; | |
574 | u32 time_precision; | |
575 | u64 features; | |
1df42b57 | 576 | u64 compat; |
1c6fdbd8 KO |
577 | } sb; |
578 | ||
579 | struct bch_sb_handle disk_sb; | |
580 | ||
581 | unsigned short block_bits; /* ilog2(block_size) */ | |
582 | ||
583 | u16 btree_foreground_merge_threshold; | |
584 | ||
585 | struct closure sb_write; | |
586 | struct mutex sb_lock; | |
587 | ||
588 | /* BTREE CACHE */ | |
589 | struct bio_set btree_bio; | |
590 | ||
591 | struct btree_root btree_roots[BTREE_ID_NR]; | |
592 | bool btree_roots_dirty; | |
593 | struct mutex btree_root_lock; | |
594 | ||
595 | struct btree_cache btree_cache; | |
596 | ||
597 | mempool_t btree_reserve_pool; | |
598 | ||
599 | /* | |
600 | * Cache of allocated btree nodes - if we allocate a btree node and | |
601 | * don't use it, if we free it that space can't be reused until going | |
602 | * _all_ the way through the allocator (which exposes us to a livelock | |
603 | * when allocating btree reserves fail halfway through) - instead, we | |
604 | * can stick them here: | |
605 | */ | |
606 | struct btree_alloc btree_reserve_cache[BTREE_NODE_RESERVE * 2]; | |
607 | unsigned btree_reserve_cache_nr; | |
608 | struct mutex btree_reserve_cache_lock; | |
609 | ||
610 | mempool_t btree_interior_update_pool; | |
611 | struct list_head btree_interior_update_list; | |
612 | struct mutex btree_interior_update_lock; | |
613 | struct closure_waitlist btree_interior_update_wait; | |
614 | ||
581edb63 KO |
615 | mempool_t btree_iters_pool; |
616 | ||
1c6fdbd8 KO |
617 | struct workqueue_struct *wq; |
618 | /* copygc needs its own workqueue for index updates.. */ | |
619 | struct workqueue_struct *copygc_wq; | |
0519b72d | 620 | struct workqueue_struct *journal_reclaim_wq; |
1c6fdbd8 KO |
621 | |
622 | /* ALLOCATION */ | |
623 | struct delayed_work pd_controllers_update; | |
624 | unsigned pd_controllers_update_seconds; | |
625 | ||
626 | struct bch_devs_mask rw_devs[BCH_DATA_NR]; | |
627 | ||
628 | u64 capacity; /* sectors */ | |
629 | ||
630 | /* | |
631 | * When capacity _decreases_ (due to a disk being removed), we | |
632 | * increment capacity_gen - this invalidates outstanding reservations | |
633 | * and forces them to be revalidated | |
634 | */ | |
635 | u32 capacity_gen; | |
b092dadd | 636 | unsigned bucket_size_max; |
1c6fdbd8 KO |
637 | |
638 | atomic64_t sectors_available; | |
639 | ||
5663a415 KO |
640 | struct bch_fs_pcpu __percpu *pcpu; |
641 | ||
5663a415 | 642 | struct percpu_rw_semaphore mark_lock; |
1c6fdbd8 | 643 | |
7ef2a73a | 644 | struct bch_fs_usage __percpu *usage[2]; |
4d8100da KO |
645 | |
646 | /* single element mempool: */ | |
647 | struct mutex usage_scratch_lock; | |
648 | struct bch_fs_usage *usage_scratch; | |
7ef2a73a | 649 | |
1c6fdbd8 KO |
650 | /* |
651 | * When we invalidate buckets, we use both the priority and the amount | |
652 | * of good data to determine which buckets to reuse first - to weight | |
653 | * those together consistently we keep track of the smallest nonzero | |
654 | * priority of any bucket. | |
655 | */ | |
656 | struct bucket_clock bucket_clock[2]; | |
657 | ||
658 | struct io_clock io_clock[2]; | |
659 | ||
1dd7f9d9 KO |
660 | /* JOURNAL SEQ BLACKLIST */ |
661 | struct journal_seq_blacklist_table * | |
662 | journal_seq_blacklist_table; | |
663 | struct work_struct journal_seq_blacklist_gc_work; | |
664 | ||
1c6fdbd8 KO |
665 | /* ALLOCATOR */ |
666 | spinlock_t freelist_lock; | |
90541a74 | 667 | struct closure_waitlist freelist_wait; |
49a67206 KO |
668 | u64 blocked_allocate; |
669 | u64 blocked_allocate_open_bucket; | |
1c6fdbd8 KO |
670 | u8 open_buckets_freelist; |
671 | u8 open_buckets_nr_free; | |
672 | struct closure_waitlist open_buckets_wait; | |
673 | struct open_bucket open_buckets[OPEN_BUCKETS_COUNT]; | |
674 | ||
675 | struct write_point btree_write_point; | |
676 | struct write_point rebalance_write_point; | |
677 | ||
b092dadd KO |
678 | struct write_point write_points[WRITE_POINT_MAX]; |
679 | struct hlist_head write_points_hash[WRITE_POINT_HASH_NR]; | |
1c6fdbd8 | 680 | struct mutex write_points_hash_lock; |
b092dadd | 681 | unsigned write_points_nr; |
1c6fdbd8 KO |
682 | |
683 | /* GARBAGE COLLECTION */ | |
684 | struct task_struct *gc_thread; | |
685 | atomic_t kick_gc; | |
686 | unsigned long gc_count; | |
687 | ||
688 | /* | |
689 | * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos] | |
690 | * has been marked by GC. | |
691 | * | |
692 | * gc_cur_phase is a superset of btree_ids (BTREE_ID_EXTENTS etc.) | |
693 | * | |
1c6fdbd8 KO |
694 | * Protected by gc_pos_lock. Only written to by GC thread, so GC thread |
695 | * can read without a lock. | |
696 | */ | |
697 | seqcount_t gc_pos_lock; | |
698 | struct gc_pos gc_pos; | |
699 | ||
700 | /* | |
701 | * The allocation code needs gc_mark in struct bucket to be correct, but | |
702 | * it's not while a gc is in progress. | |
703 | */ | |
704 | struct rw_semaphore gc_lock; | |
705 | ||
706 | /* IO PATH */ | |
707 | struct bio_set bio_read; | |
708 | struct bio_set bio_read_split; | |
709 | struct bio_set bio_write; | |
710 | struct mutex bio_bounce_pages_lock; | |
711 | mempool_t bio_bounce_pages; | |
712 | struct rhashtable promote_table; | |
713 | ||
714 | mempool_t compression_bounce[2]; | |
715 | mempool_t compress_workspace[BCH_COMPRESSION_NR]; | |
716 | mempool_t decompress_workspace; | |
717 | ZSTD_parameters zstd_params; | |
718 | ||
719 | struct crypto_shash *sha256; | |
720 | struct crypto_sync_skcipher *chacha20; | |
721 | struct crypto_shash *poly1305; | |
722 | ||
723 | atomic64_t key_version; | |
724 | ||
725 | /* REBALANCE */ | |
726 | struct bch_fs_rebalance rebalance; | |
727 | ||
dfe9bfb3 KO |
728 | /* STRIPES: */ |
729 | GENRADIX(struct stripe) stripes[2]; | |
730 | struct mutex ec_stripe_create_lock; | |
cd575ddf KO |
731 | |
732 | ec_stripes_heap ec_stripes_heap; | |
733 | spinlock_t ec_stripes_heap_lock; | |
734 | ||
dfe9bfb3 KO |
735 | /* ERASURE CODING */ |
736 | struct list_head ec_new_stripe_list; | |
737 | struct mutex ec_new_stripe_lock; | |
738 | ||
cd575ddf KO |
739 | struct bio_set ec_bioset; |
740 | ||
741 | struct work_struct ec_stripe_delete_work; | |
742 | struct llist_head ec_stripe_delete_list; | |
743 | ||
1c6fdbd8 KO |
744 | /* VFS IO PATH - fs-io.c */ |
745 | struct bio_set writepage_bioset; | |
746 | struct bio_set dio_write_bioset; | |
747 | struct bio_set dio_read_bioset; | |
748 | ||
749 | struct bio_list btree_write_error_list; | |
750 | struct work_struct btree_write_error_work; | |
751 | spinlock_t btree_write_error_lock; | |
752 | ||
753 | /* ERRORS */ | |
754 | struct list_head fsck_errors; | |
755 | struct mutex fsck_error_lock; | |
756 | bool fsck_alloc_err; | |
757 | ||
1c6fdbd8 KO |
758 | /* QUOTAS */ |
759 | struct bch_memquota_type quotas[QTYP_NR]; | |
760 | ||
761 | /* DEBUG JUNK */ | |
762 | struct dentry *debug; | |
763 | struct btree_debug btree_debug[BTREE_ID_NR]; | |
764 | #ifdef CONFIG_BCACHEFS_DEBUG | |
765 | struct btree *verify_data; | |
766 | struct btree_node *verify_ondisk; | |
767 | struct mutex verify_lock; | |
768 | #endif | |
769 | ||
770 | u64 unused_inode_hint; | |
771 | ||
772 | /* | |
773 | * A btree node on disk could have too many bsets for an iterator to fit | |
774 | * on the stack - have to dynamically allocate them | |
775 | */ | |
776 | mempool_t fill_iter; | |
777 | ||
778 | mempool_t btree_bounce_pool; | |
779 | ||
780 | struct journal journal; | |
781 | ||
c6923995 | 782 | u64 last_bucket_seq_cleanup; |
1c6fdbd8 KO |
783 | |
784 | /* The rest of this all shows up in sysfs */ | |
785 | atomic_long_t read_realloc_races; | |
786 | atomic_long_t extent_migrate_done; | |
787 | atomic_long_t extent_migrate_raced; | |
788 | ||
789 | unsigned btree_gc_periodic:1; | |
790 | unsigned copy_gc_enabled:1; | |
791 | bool promote_whole_extents; | |
792 | ||
793 | #define BCH_DEBUG_PARAM(name, description) bool name; | |
794 | BCH_DEBUG_PARAMS_ALL() | |
795 | #undef BCH_DEBUG_PARAM | |
796 | ||
797 | struct bch2_time_stats times[BCH_TIME_STAT_NR]; | |
798 | }; | |
799 | ||
800 | static inline void bch2_set_ra_pages(struct bch_fs *c, unsigned ra_pages) | |
801 | { | |
802 | #ifndef NO_BCACHEFS_FS | |
803 | if (c->vfs_sb) | |
804 | c->vfs_sb->s_bdi->ra_pages = ra_pages; | |
805 | #endif | |
806 | } | |
807 | ||
1c6fdbd8 KO |
808 | static inline unsigned bucket_bytes(const struct bch_dev *ca) |
809 | { | |
810 | return ca->mi.bucket_size << 9; | |
811 | } | |
812 | ||
813 | static inline unsigned block_bytes(const struct bch_fs *c) | |
814 | { | |
815 | return c->opts.block_size << 9; | |
816 | } | |
817 | ||
818 | static inline struct timespec64 bch2_time_to_timespec(struct bch_fs *c, u64 time) | |
819 | { | |
820 | return ns_to_timespec64(time * c->sb.time_precision + c->sb.time_base_lo); | |
821 | } | |
822 | ||
823 | static inline s64 timespec_to_bch2_time(struct bch_fs *c, struct timespec64 ts) | |
824 | { | |
825 | s64 ns = timespec64_to_ns(&ts) - c->sb.time_base_lo; | |
826 | ||
827 | if (c->sb.time_precision == 1) | |
828 | return ns; | |
829 | ||
830 | return div_s64(ns, c->sb.time_precision); | |
831 | } | |
832 | ||
833 | static inline s64 bch2_current_time(struct bch_fs *c) | |
834 | { | |
835 | struct timespec64 now; | |
836 | ||
837 | ktime_get_real_ts64(&now); | |
838 | return timespec_to_bch2_time(c, now); | |
839 | } | |
840 | ||
841 | #endif /* _BCACHEFS_H */ |