Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
cafe5635 KO |
2 | #ifndef _BCACHE_H |
3 | #define _BCACHE_H | |
4 | ||
5 | /* | |
6 | * SOME HIGH LEVEL CODE DOCUMENTATION: | |
7 | * | |
8 | * Bcache mostly works with cache sets, cache devices, and backing devices. | |
9 | * | |
10 | * Support for multiple cache devices hasn't quite been finished off yet, but | |
11 | * it's about 95% plumbed through. A cache set and its cache devices is sort of | |
12 | * like a md raid array and its component devices. Most of the code doesn't care | |
13 | * about individual cache devices, the main abstraction is the cache set. | |
14 | * | |
15 | * Multiple cache devices is intended to give us the ability to mirror dirty | |
16 | * cached data and metadata, without mirroring clean cached data. | |
17 | * | |
18 | * Backing devices are different, in that they have a lifetime independent of a | |
19 | * cache set. When you register a newly formatted backing device it'll come up | |
20 | * in passthrough mode, and then you can attach and detach a backing device from | |
21 | * a cache set at runtime - while it's mounted and in use. Detaching implicitly | |
22 | * invalidates any cached data for that backing device. | |
23 | * | |
24 | * A cache set can have multiple (many) backing devices attached to it. | |
25 | * | |
26 | * There's also flash only volumes - this is the reason for the distinction | |
27 | * between struct cached_dev and struct bcache_device. A flash only volume | |
28 | * works much like a bcache device that has a backing device, except the | |
29 | * "cached" data is always dirty. The end result is that we get thin | |
30 | * provisioning with very little additional code. | |
31 | * | |
32 | * Flash only volumes work but they're not production ready because the moving | |
33 | * garbage collector needs more work. More on that later. | |
34 | * | |
35 | * BUCKETS/ALLOCATION: | |
36 | * | |
37 | * Bcache is primarily designed for caching, which means that in normal | |
38 | * operation all of our available space will be allocated. Thus, we need an | |
39 | * efficient way of deleting things from the cache so we can write new things to | |
40 | * it. | |
41 | * | |
42 | * To do this, we first divide the cache device up into buckets. A bucket is the | |
43 | * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+ | |
44 | * works efficiently. | |
45 | * | |
46 | * Each bucket has a 16 bit priority, and an 8 bit generation associated with | |
47 | * it. The gens and priorities for all the buckets are stored contiguously and | |
48 | * packed on disk (in a linked list of buckets - aside from the superblock, all | |
49 | * of bcache's metadata is stored in buckets). | |
50 | * | |
51 | * The priority is used to implement an LRU. We reset a bucket's priority when | |
52 | * we allocate it or on cache it, and every so often we decrement the priority | |
53 | * of each bucket. It could be used to implement something more sophisticated, | |
54 | * if anyone ever gets around to it. | |
55 | * | |
56 | * The generation is used for invalidating buckets. Each pointer also has an 8 | |
57 | * bit generation embedded in it; for a pointer to be considered valid, its gen | |
58 | * must match the gen of the bucket it points into. Thus, to reuse a bucket all | |
59 | * we have to do is increment its gen (and write its new gen to disk; we batch | |
60 | * this up). | |
61 | * | |
62 | * Bcache is entirely COW - we never write twice to a bucket, even buckets that | |
63 | * contain metadata (including btree nodes). | |
64 | * | |
65 | * THE BTREE: | |
66 | * | |
67 | * Bcache is in large part design around the btree. | |
68 | * | |
69 | * At a high level, the btree is just an index of key -> ptr tuples. | |
70 | * | |
71 | * Keys represent extents, and thus have a size field. Keys also have a variable | |
72 | * number of pointers attached to them (potentially zero, which is handy for | |
73 | * invalidating the cache). | |
74 | * | |
75 | * The key itself is an inode:offset pair. The inode number corresponds to a | |
76 | * backing device or a flash only volume. The offset is the ending offset of the | |
77 | * extent within the inode - not the starting offset; this makes lookups | |
78 | * slightly more convenient. | |
79 | * | |
80 | * Pointers contain the cache device id, the offset on that device, and an 8 bit | |
81 | * generation number. More on the gen later. | |
82 | * | |
83 | * Index lookups are not fully abstracted - cache lookups in particular are | |
84 | * still somewhat mixed in with the btree code, but things are headed in that | |
85 | * direction. | |
86 | * | |
87 | * Updates are fairly well abstracted, though. There are two different ways of | |
88 | * updating the btree; insert and replace. | |
89 | * | |
90 | * BTREE_INSERT will just take a list of keys and insert them into the btree - | |
91 | * overwriting (possibly only partially) any extents they overlap with. This is | |
92 | * used to update the index after a write. | |
93 | * | |
94 | * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is | |
95 | * overwriting a key that matches another given key. This is used for inserting | |
96 | * data into the cache after a cache miss, and for background writeback, and for | |
97 | * the moving garbage collector. | |
98 | * | |
99 | * There is no "delete" operation; deleting things from the index is | |
100 | * accomplished by either by invalidating pointers (by incrementing a bucket's | |
101 | * gen) or by inserting a key with 0 pointers - which will overwrite anything | |
102 | * previously present at that location in the index. | |
103 | * | |
104 | * This means that there are always stale/invalid keys in the btree. They're | |
105 | * filtered out by the code that iterates through a btree node, and removed when | |
106 | * a btree node is rewritten. | |
107 | * | |
108 | * BTREE NODES: | |
109 | * | |
6dd3be69 | 110 | * Our unit of allocation is a bucket, and we can't arbitrarily allocate and |
cafe5635 KO |
111 | * free smaller than a bucket - so, that's how big our btree nodes are. |
112 | * | |
113 | * (If buckets are really big we'll only use part of the bucket for a btree node | |
114 | * - no less than 1/4th - but a bucket still contains no more than a single | |
115 | * btree node. I'd actually like to change this, but for now we rely on the | |
116 | * bucket's gen for deleting btree nodes when we rewrite/split a node.) | |
117 | * | |
118 | * Anyways, btree nodes are big - big enough to be inefficient with a textbook | |
119 | * btree implementation. | |
120 | * | |
121 | * The way this is solved is that btree nodes are internally log structured; we | |
122 | * can append new keys to an existing btree node without rewriting it. This | |
123 | * means each set of keys we write is sorted, but the node is not. | |
124 | * | |
125 | * We maintain this log structure in memory - keeping 1Mb of keys sorted would | |
126 | * be expensive, and we have to distinguish between the keys we have written and | |
127 | * the keys we haven't. So to do a lookup in a btree node, we have to search | |
128 | * each sorted set. But we do merge written sets together lazily, so the cost of | |
129 | * these extra searches is quite low (normally most of the keys in a btree node | |
130 | * will be in one big set, and then there'll be one or two sets that are much | |
131 | * smaller). | |
132 | * | |
133 | * This log structure makes bcache's btree more of a hybrid between a | |
134 | * conventional btree and a compacting data structure, with some of the | |
135 | * advantages of both. | |
136 | * | |
137 | * GARBAGE COLLECTION: | |
138 | * | |
139 | * We can't just invalidate any bucket - it might contain dirty data or | |
140 | * metadata. If it once contained dirty data, other writes might overwrite it | |
141 | * later, leaving no valid pointers into that bucket in the index. | |
142 | * | |
143 | * Thus, the primary purpose of garbage collection is to find buckets to reuse. | |
144 | * It also counts how much valid data it each bucket currently contains, so that | |
145 | * allocation can reuse buckets sooner when they've been mostly overwritten. | |
146 | * | |
147 | * It also does some things that are really internal to the btree | |
148 | * implementation. If a btree node contains pointers that are stale by more than | |
149 | * some threshold, it rewrites the btree node to avoid the bucket's generation | |
150 | * wrapping around. It also merges adjacent btree nodes if they're empty enough. | |
151 | * | |
152 | * THE JOURNAL: | |
153 | * | |
154 | * Bcache's journal is not necessary for consistency; we always strictly | |
155 | * order metadata writes so that the btree and everything else is consistent on | |
156 | * disk in the event of an unclean shutdown, and in fact bcache had writeback | |
157 | * caching (with recovery from unclean shutdown) before journalling was | |
158 | * implemented. | |
159 | * | |
160 | * Rather, the journal is purely a performance optimization; we can't complete a | |
161 | * write until we've updated the index on disk, otherwise the cache would be | |
162 | * inconsistent in the event of an unclean shutdown. This means that without the | |
163 | * journal, on random write workloads we constantly have to update all the leaf | |
164 | * nodes in the btree, and those writes will be mostly empty (appending at most | |
165 | * a few keys each) - highly inefficient in terms of amount of metadata writes, | |
166 | * and it puts more strain on the various btree resorting/compacting code. | |
167 | * | |
168 | * The journal is just a log of keys we've inserted; on startup we just reinsert | |
169 | * all the keys in the open journal entries. That means that when we're updating | |
170 | * a node in the btree, we can wait until a 4k block of keys fills up before | |
171 | * writing them out. | |
172 | * | |
173 | * For simplicity, we only journal updates to leaf nodes; updates to parent | |
174 | * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth | |
175 | * the complexity to deal with journalling them (in particular, journal replay) | |
176 | * - updates to non leaf nodes just happen synchronously (see btree_split()). | |
177 | */ | |
178 | ||
46f5aa88 | 179 | #define pr_fmt(fmt) "bcache: %s() " fmt, __func__ |
cafe5635 KO |
180 | |
181 | #include <linux/bio.h> | |
8c8d2d96 | 182 | #include <linux/closure.h> |
cafe5635 KO |
183 | #include <linux/kobject.h> |
184 | #include <linux/list.h> | |
185 | #include <linux/mutex.h> | |
186 | #include <linux/rbtree.h> | |
187 | #include <linux/rwsem.h> | |
3b304d24 | 188 | #include <linux/refcount.h> |
cafe5635 KO |
189 | #include <linux/types.h> |
190 | #include <linux/workqueue.h> | |
771f393e | 191 | #include <linux/kthread.h> |
cafe5635 | 192 | |
cf2197ca | 193 | #include "bcache_ondisk.h" |
67539e85 | 194 | #include "bset.h" |
cafe5635 | 195 | #include "util.h" |
cafe5635 KO |
196 | |
197 | struct bucket { | |
198 | atomic_t pin; | |
199 | uint16_t prio; | |
200 | uint8_t gen; | |
cafe5635 | 201 | uint8_t last_gc; /* Most out of date gen in the btree */ |
981aa8c0 | 202 | uint16_t gc_mark; /* Bitfield used by GC. See below for field */ |
a14a68b7 | 203 | uint16_t reclaimable_in_gc:1; |
cafe5635 KO |
204 | }; |
205 | ||
206 | /* | |
207 | * I'd use bitfields for these, but I don't trust the compiler not to screw me | |
208 | * as multiple threads touch struct bucket without locking | |
209 | */ | |
210 | ||
211 | BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); | |
4fe6a816 KO |
212 | #define GC_MARK_RECLAIMABLE 1 |
213 | #define GC_MARK_DIRTY 2 | |
214 | #define GC_MARK_METADATA 3 | |
94717447 DW |
215 | #define GC_SECTORS_USED_SIZE 13 |
216 | #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) | |
217 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); | |
981aa8c0 | 218 | BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); |
cafe5635 | 219 | |
cafe5635 KO |
220 | #include "journal.h" |
221 | #include "stats.h" | |
222 | struct search; | |
223 | struct btree; | |
224 | struct keybuf; | |
225 | ||
226 | struct keybuf_key { | |
227 | struct rb_node node; | |
228 | BKEY_PADDED(key); | |
229 | void *private; | |
230 | }; | |
231 | ||
cafe5635 | 232 | struct keybuf { |
cafe5635 KO |
233 | struct bkey last_scanned; |
234 | spinlock_t lock; | |
235 | ||
236 | /* | |
237 | * Beginning and end of range in rb tree - so that we can skip taking | |
238 | * lock and checking the rb tree when we need to check for overlapping | |
239 | * keys. | |
240 | */ | |
241 | struct bkey start; | |
242 | struct bkey end; | |
243 | ||
244 | struct rb_root keys; | |
245 | ||
48a915a8 | 246 | #define KEYBUF_NR 500 |
cafe5635 KO |
247 | DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR); |
248 | }; | |
249 | ||
cafe5635 KO |
250 | struct bcache_device { |
251 | struct closure cl; | |
252 | ||
253 | struct kobject kobj; | |
254 | ||
255 | struct cache_set *c; | |
6f10f7d1 | 256 | unsigned int id; |
cafe5635 KO |
257 | #define BCACHEDEVNAME_SIZE 12 |
258 | char name[BCACHEDEVNAME_SIZE]; | |
259 | ||
260 | struct gendisk *disk; | |
261 | ||
c4d951dd | 262 | unsigned long flags; |
3fd47bfe CL |
263 | #define BCACHE_DEV_CLOSING 0 |
264 | #define BCACHE_DEV_DETACHING 1 | |
265 | #define BCACHE_DEV_UNLINK_DONE 2 | |
266 | #define BCACHE_DEV_WB_RUNNING 3 | |
267 | #define BCACHE_DEV_RATE_DW_RUNNING 4 | |
7a148126 | 268 | int nr_stripes; |
baf8fb7e | 269 | #define BCH_MIN_STRIPE_SZ ((4 << 20) >> SECTOR_SHIFT) |
6f10f7d1 | 270 | unsigned int stripe_size; |
279afbad | 271 | atomic_t *stripe_sectors_dirty; |
48a915a8 | 272 | unsigned long *full_dirty_stripes; |
279afbad | 273 | |
d19936a2 | 274 | struct bio_set bio_split; |
cafe5635 | 275 | |
6f10f7d1 | 276 | unsigned int data_csum:1; |
cafe5635 | 277 | |
fc2d5988 CL |
278 | int (*cache_miss)(struct btree *b, struct search *s, |
279 | struct bio *bio, unsigned int sectors); | |
05bdb996 | 280 | int (*ioctl)(struct bcache_device *d, blk_mode_t mode, |
d0c1b89a | 281 | unsigned int cmd, unsigned long arg); |
cafe5635 KO |
282 | }; |
283 | ||
284 | struct io { | |
285 | /* Used to track sequential IO so it can be skipped */ | |
286 | struct hlist_node hash; | |
287 | struct list_head lru; | |
288 | ||
289 | unsigned long jiffies; | |
6f10f7d1 | 290 | unsigned int sequential; |
cafe5635 KO |
291 | sector_t last; |
292 | }; | |
293 | ||
7e027ca4 CL |
294 | enum stop_on_failure { |
295 | BCH_CACHED_DEV_STOP_AUTO = 0, | |
296 | BCH_CACHED_DEV_STOP_ALWAYS, | |
297 | BCH_CACHED_DEV_STOP_MODE_MAX, | |
298 | }; | |
299 | ||
cafe5635 KO |
300 | struct cached_dev { |
301 | struct list_head list; | |
302 | struct bcache_device disk; | |
303 | struct block_device *bdev; | |
3789fb87 | 304 | struct file *bdev_file; |
cafe5635 KO |
305 | |
306 | struct cache_sb sb; | |
475389ae | 307 | struct cache_sb_disk *sb_disk; |
cafe5635 KO |
308 | struct bio sb_bio; |
309 | struct bio_vec sb_bv[1]; | |
cb7a583e KO |
310 | struct closure sb_write; |
311 | struct semaphore sb_write_mutex; | |
cafe5635 KO |
312 | |
313 | /* Refcount on the cache set. Always nonzero when we're caching. */ | |
3b304d24 | 314 | refcount_t count; |
cafe5635 KO |
315 | struct work_struct detach; |
316 | ||
317 | /* | |
318 | * Device might not be running if it's dirty and the cache set hasn't | |
319 | * showed up yet. | |
320 | */ | |
321 | atomic_t running; | |
322 | ||
323 | /* | |
324 | * Writes take a shared lock from start to finish; scanning for dirty | |
325 | * data to refill the rb tree requires an exclusive lock. | |
326 | */ | |
327 | struct rw_semaphore writeback_lock; | |
328 | ||
329 | /* | |
330 | * Nonzero, and writeback has a refcount (d->count), iff there is dirty | |
331 | * data in the cache. Protected by writeback_lock; must have an | |
332 | * shared lock to set and exclusive lock to clear. | |
333 | */ | |
334 | atomic_t has_dirty; | |
335 | ||
038ba8cc CL |
336 | #define BCH_CACHE_READA_ALL 0 |
337 | #define BCH_CACHE_READA_META_ONLY 1 | |
338 | unsigned int cache_readahead_policy; | |
c2a4f318 | 339 | struct bch_ratelimit writeback_rate; |
cafe5635 KO |
340 | struct delayed_work writeback_rate_update; |
341 | ||
c2a4f318 KO |
342 | /* Limit number of writeback bios in flight */ |
343 | struct semaphore in_flight; | |
5e6926da | 344 | struct task_struct *writeback_thread; |
9baf3097 | 345 | struct workqueue_struct *writeback_write_wq; |
cafe5635 KO |
346 | |
347 | struct keybuf writeback_keys; | |
348 | ||
0f0709e6 | 349 | struct task_struct *status_update_thread; |
6e6ccc67 ML |
350 | /* |
351 | * Order the write-half of writeback operations strongly in dispatch | |
352 | * order. (Maintain LBA order; don't allow reads completing out of | |
353 | * order to re-order the writes...) | |
354 | */ | |
355 | struct closure_waitlist writeback_ordering_wait; | |
356 | atomic_t writeback_sequence_next; | |
357 | ||
cafe5635 KO |
358 | /* For tracking sequential IO */ |
359 | #define RECENT_IO_BITS 7 | |
360 | #define RECENT_IO (1 << RECENT_IO_BITS) | |
361 | struct io io[RECENT_IO]; | |
362 | struct hlist_head io_hash[RECENT_IO + 1]; | |
363 | struct list_head io_lru; | |
364 | spinlock_t io_lock; | |
365 | ||
366 | struct cache_accounting accounting; | |
367 | ||
368 | /* The rest of this all shows up in sysfs */ | |
6f10f7d1 | 369 | unsigned int sequential_cutoff; |
cafe5635 | 370 | |
6f10f7d1 CL |
371 | unsigned int io_disable:1; |
372 | unsigned int verify:1; | |
373 | unsigned int bypass_torture_test:1; | |
cafe5635 | 374 | |
6f10f7d1 CL |
375 | unsigned int partial_stripes_expensive:1; |
376 | unsigned int writeback_metadata:1; | |
377 | unsigned int writeback_running:1; | |
71dda2a5 | 378 | unsigned int writeback_consider_fragment:1; |
cafe5635 | 379 | unsigned char writeback_percent; |
6f10f7d1 | 380 | unsigned int writeback_delay; |
cafe5635 | 381 | |
cafe5635 | 382 | uint64_t writeback_rate_target; |
16749c23 | 383 | int64_t writeback_rate_proportional; |
1d316e65 ML |
384 | int64_t writeback_rate_integral; |
385 | int64_t writeback_rate_integral_scaled; | |
e41166c5 | 386 | int32_t writeback_rate_change; |
cafe5635 | 387 | |
6f10f7d1 CL |
388 | unsigned int writeback_rate_update_seconds; |
389 | unsigned int writeback_rate_i_term_inverse; | |
390 | unsigned int writeback_rate_p_term_inverse; | |
71dda2a5 | 391 | unsigned int writeback_rate_fp_term_low; |
392 | unsigned int writeback_rate_fp_term_mid; | |
393 | unsigned int writeback_rate_fp_term_high; | |
6f10f7d1 | 394 | unsigned int writeback_rate_minimum; |
7e027ca4 CL |
395 | |
396 | enum stop_on_failure stop_when_cache_set_failed; | |
c7b7bd07 CL |
397 | #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 |
398 | atomic_t io_errors; | |
6f10f7d1 CL |
399 | unsigned int error_limit; |
400 | unsigned int offline_seconds; | |
a1a2d8f0 CL |
401 | |
402 | /* | |
403 | * Retry to update writeback_rate if contention happens for | |
404 | * down_read(dc->writeback_lock) in update_writeback_rate() | |
405 | */ | |
406 | #define BCH_WBRATE_UPDATE_MAX_SKIPS 15 | |
407 | unsigned int rate_update_retry; | |
cafe5635 KO |
408 | }; |
409 | ||
78365411 KO |
410 | enum alloc_reserve { |
411 | RESERVE_BTREE, | |
412 | RESERVE_PRIO, | |
413 | RESERVE_MOVINGGC, | |
414 | RESERVE_NONE, | |
415 | RESERVE_NR, | |
cafe5635 KO |
416 | }; |
417 | ||
418 | struct cache { | |
419 | struct cache_set *set; | |
420 | struct cache_sb sb; | |
475389ae | 421 | struct cache_sb_disk *sb_disk; |
cafe5635 KO |
422 | struct bio sb_bio; |
423 | struct bio_vec sb_bv[1]; | |
424 | ||
425 | struct kobject kobj; | |
426 | struct block_device *bdev; | |
3789fb87 | 427 | struct file *bdev_file; |
cafe5635 | 428 | |
119ba0f8 | 429 | struct task_struct *alloc_thread; |
cafe5635 KO |
430 | |
431 | struct closure prio; | |
432 | struct prio_set *disk_buckets; | |
433 | ||
434 | /* | |
435 | * When allocating new buckets, prio_write() gets first dibs - since we | |
436 | * may not be allocate at all without writing priorities and gens. | |
cb329dec CL |
437 | * prio_last_buckets[] contains the last buckets we wrote priorities to |
438 | * (so gc can mark them as metadata), prio_buckets[] contains the | |
439 | * buckets allocated for the next prio write. | |
cafe5635 KO |
440 | */ |
441 | uint64_t *prio_buckets; | |
442 | uint64_t *prio_last_buckets; | |
443 | ||
444 | /* | |
445 | * free: Buckets that are ready to be used | |
446 | * | |
447 | * free_inc: Incoming buckets - these are buckets that currently have | |
448 | * cached data in them, and we can't reuse them until after we write | |
449 | * their new gen to disk. After prio_write() finishes writing the new | |
450 | * gens/prios, they'll be moved to the free list (and possibly discarded | |
451 | * in the process) | |
cafe5635 | 452 | */ |
78365411 | 453 | DECLARE_FIFO(long, free)[RESERVE_NR]; |
cafe5635 | 454 | DECLARE_FIFO(long, free_inc); |
cafe5635 KO |
455 | |
456 | size_t fifo_last_bucket; | |
457 | ||
458 | /* Allocation stuff: */ | |
459 | struct bucket *buckets; | |
460 | ||
48fd7ebe | 461 | DECLARE_HEAP(struct bucket *, heap); |
cafe5635 | 462 | |
cafe5635 KO |
463 | /* |
464 | * If nonzero, we know we aren't going to find any buckets to invalidate | |
465 | * until a gc finishes - otherwise we could pointlessly burn a ton of | |
466 | * cpu | |
467 | */ | |
6f10f7d1 | 468 | unsigned int invalidate_needs_gc; |
cafe5635 KO |
469 | |
470 | bool discard; /* Get rid of? */ | |
471 | ||
cafe5635 KO |
472 | struct journal_device journal; |
473 | ||
474 | /* The rest of this all shows up in sysfs */ | |
475 | #define IO_ERROR_SHIFT 20 | |
476 | atomic_t io_errors; | |
477 | atomic_t io_count; | |
478 | ||
479 | atomic_long_t meta_sectors_written; | |
480 | atomic_long_t btree_sectors_written; | |
481 | atomic_long_t sectors_written; | |
cafe5635 KO |
482 | }; |
483 | ||
484 | struct gc_stat { | |
485 | size_t nodes; | |
5c25c4fc | 486 | size_t nodes_pre; |
cafe5635 KO |
487 | size_t key_bytes; |
488 | ||
489 | size_t nkeys; | |
490 | uint64_t data; /* sectors */ | |
6f10f7d1 | 491 | unsigned int in_use; /* percent */ |
cafe5635 KO |
492 | }; |
493 | ||
494 | /* | |
495 | * Flag bits, for how the cache set is shutting down, and what phase it's at: | |
496 | * | |
497 | * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching | |
498 | * all the backing devices first (their cached data gets invalidated, and they | |
499 | * won't automatically reattach). | |
500 | * | |
501 | * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; | |
502 | * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. | |
503 | * flushing dirty data). | |
bf0c55c9 SP |
504 | * |
505 | * CACHE_SET_RUNNING means all cache devices have been registered and journal | |
506 | * replay is complete. | |
771f393e CL |
507 | * |
508 | * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all | |
509 | * external and internal I/O should be denied when this flag is set. | |
510 | * | |
cafe5635 KO |
511 | */ |
512 | #define CACHE_SET_UNREGISTERING 0 | |
513 | #define CACHE_SET_STOPPING 1 | |
bf0c55c9 | 514 | #define CACHE_SET_RUNNING 2 |
771f393e | 515 | #define CACHE_SET_IO_DISABLE 3 |
cafe5635 KO |
516 | |
517 | struct cache_set { | |
518 | struct closure cl; | |
519 | ||
520 | struct list_head list; | |
521 | struct kobject kobj; | |
522 | struct kobject internal; | |
523 | struct dentry *debug; | |
524 | struct cache_accounting accounting; | |
525 | ||
526 | unsigned long flags; | |
ea8c5356 CL |
527 | atomic_t idle_counter; |
528 | atomic_t at_max_writeback_rate; | |
cafe5635 | 529 | |
697e2349 | 530 | struct cache *cache; |
cafe5635 KO |
531 | |
532 | struct bcache_device **devices; | |
6f10f7d1 | 533 | unsigned int devices_max_used; |
ea8c5356 | 534 | atomic_t attached_dev_nr; |
cafe5635 KO |
535 | struct list_head cached_devs; |
536 | uint64_t cached_dev_sectors; | |
99a27d59 | 537 | atomic_long_t flash_dev_dirty_sectors; |
cafe5635 KO |
538 | struct closure caching; |
539 | ||
cb7a583e KO |
540 | struct closure sb_write; |
541 | struct semaphore sb_write_mutex; | |
cafe5635 | 542 | |
d19936a2 KO |
543 | mempool_t search; |
544 | mempool_t bio_meta; | |
545 | struct bio_set bio_split; | |
cafe5635 KO |
546 | |
547 | /* For the btree cache */ | |
a6a1eb62 | 548 | struct shrinker *shrink; |
cafe5635 | 549 | |
cafe5635 KO |
550 | /* For the btree cache and anything allocation related */ |
551 | struct mutex bucket_lock; | |
552 | ||
553 | /* log2(bucket_size), in sectors */ | |
554 | unsigned short bucket_bits; | |
555 | ||
556 | /* log2(block_size), in sectors */ | |
557 | unsigned short block_bits; | |
558 | ||
559 | /* | |
560 | * Default number of pages for a new btree node - may be less than a | |
561 | * full bucket | |
562 | */ | |
6f10f7d1 | 563 | unsigned int btree_pages; |
cafe5635 KO |
564 | |
565 | /* | |
566 | * Lists of struct btrees; lru is the list for structs that have memory | |
567 | * allocated for actual btree node, freed is for structs that do not. | |
568 | * | |
569 | * We never free a struct btree, except on shutdown - we just put it on | |
570 | * the btree_cache_freed list and reuse it later. This simplifies the | |
571 | * code, and it doesn't cost us much memory as the memory usage is | |
572 | * dominated by buffers that hold the actual btree node data and those | |
573 | * can be freed - and the number of struct btrees allocated is | |
574 | * effectively bounded. | |
575 | * | |
576 | * btree_cache_freeable effectively is a small cache - we use it because | |
577 | * high order page allocations can be rather expensive, and it's quite | |
578 | * common to delete and allocate btree nodes in quick succession. It | |
579 | * should never grow past ~2-3 nodes in practice. | |
580 | */ | |
581 | struct list_head btree_cache; | |
582 | struct list_head btree_cache_freeable; | |
583 | struct list_head btree_cache_freed; | |
584 | ||
585 | /* Number of elements in btree_cache + btree_cache_freeable lists */ | |
6f10f7d1 | 586 | unsigned int btree_cache_used; |
cafe5635 KO |
587 | |
588 | /* | |
589 | * If we need to allocate memory for a new btree node and that | |
590 | * allocation fails, we can cannibalize another node in the btree cache | |
0a63b66d KO |
591 | * to satisfy the allocation - lock to guarantee only one thread does |
592 | * this at a time: | |
cafe5635 | 593 | */ |
0a63b66d KO |
594 | wait_queue_head_t btree_cache_wait; |
595 | struct task_struct *btree_cache_alloc_lock; | |
34cf78bf | 596 | spinlock_t btree_cannibalize_lock; |
cafe5635 KO |
597 | |
598 | /* | |
599 | * When we free a btree node, we increment the gen of the bucket the | |
600 | * node is in - but we can't rewrite the prios and gens until we | |
601 | * finished whatever it is we were doing, otherwise after a crash the | |
602 | * btree node would be freed but for say a split, we might not have the | |
603 | * pointers to the new nodes inserted into the btree yet. | |
604 | * | |
605 | * This is a refcount that blocks prio_write() until the new keys are | |
606 | * written. | |
607 | */ | |
608 | atomic_t prio_blocked; | |
35fcd848 | 609 | wait_queue_head_t bucket_wait; |
cafe5635 KO |
610 | |
611 | /* | |
612 | * For any bio we don't skip we subtract the number of sectors from | |
613 | * rescale; when it hits 0 we rescale all the bucket priorities. | |
614 | */ | |
615 | atomic_t rescale; | |
5c25c4fc TJ |
616 | /* |
617 | * used for GC, identify if any front side I/Os is inflight | |
618 | */ | |
619 | atomic_t search_inflight; | |
cafe5635 KO |
620 | /* |
621 | * When we invalidate buckets, we use both the priority and the amount | |
622 | * of good data to determine which buckets to reuse first - to weight | |
623 | * those together consistently we keep track of the smallest nonzero | |
624 | * priority of any bucket. | |
625 | */ | |
626 | uint16_t min_prio; | |
627 | ||
628 | /* | |
b0d30981 CL |
629 | * max(gen - last_gc) for all buckets. When it gets too big we have to |
630 | * gc to keep gens from wrapping around. | |
cafe5635 KO |
631 | */ |
632 | uint8_t need_gc; | |
633 | struct gc_stat gc_stats; | |
634 | size_t nbuckets; | |
d44c2f9e | 635 | size_t avail_nbuckets; |
cafe5635 | 636 | |
72a44517 | 637 | struct task_struct *gc_thread; |
cafe5635 KO |
638 | /* Where in the btree gc currently is */ |
639 | struct bkey gc_done; | |
640 | ||
7a671d8e CL |
641 | /* |
642 | * For automatical garbage collection after writeback completed, this | |
643 | * varialbe is used as bit fields, | |
644 | * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback | |
645 | * - 0000 0010b (BCH_DO_AUTO_GC): do gc after writeback | |
646 | * This is an optimization for following write request after writeback | |
647 | * finished, but read hit rate dropped due to clean data on cache is | |
648 | * discarded. Unless user explicitly sets it via sysfs, it won't be | |
649 | * enabled. | |
650 | */ | |
651 | #define BCH_ENABLE_AUTO_GC 1 | |
652 | #define BCH_DO_AUTO_GC 2 | |
653 | uint8_t gc_after_writeback; | |
654 | ||
cafe5635 KO |
655 | /* |
656 | * The allocation code needs gc_mark in struct bucket to be correct, but | |
657 | * it's not while a gc is in progress. Protected by bucket_lock. | |
658 | */ | |
659 | int gc_mark_valid; | |
660 | ||
661 | /* Counts how many sectors bio_insert has added to the cache */ | |
662 | atomic_t sectors_to_gc; | |
be628be0 | 663 | wait_queue_head_t gc_wait; |
cafe5635 | 664 | |
cafe5635 KO |
665 | struct keybuf moving_gc_keys; |
666 | /* Number of moving GC bios in flight */ | |
72a44517 | 667 | struct semaphore moving_in_flight; |
cafe5635 | 668 | |
da415a09 NS |
669 | struct workqueue_struct *moving_gc_wq; |
670 | ||
cafe5635 KO |
671 | struct btree *root; |
672 | ||
673 | #ifdef CONFIG_BCACHE_DEBUG | |
674 | struct btree *verify_data; | |
78b77bf8 | 675 | struct bset *verify_ondisk; |
cafe5635 KO |
676 | struct mutex verify_lock; |
677 | #endif | |
678 | ||
1132e56e | 679 | uint8_t set_uuid[16]; |
6f10f7d1 | 680 | unsigned int nr_uuids; |
cafe5635 KO |
681 | struct uuid_entry *uuids; |
682 | BKEY_PADDED(uuid_bucket); | |
cb7a583e KO |
683 | struct closure uuid_write; |
684 | struct semaphore uuid_write_mutex; | |
cafe5635 KO |
685 | |
686 | /* | |
687 | * A btree node on disk could have too many bsets for an iterator to fit | |
d2f96f48 SW |
688 | * on the stack - have to dynamically allocate them. |
689 | * bch_cache_set_alloc() will make sure the pool can allocate iterators | |
690 | * equipped with enough room that can host | |
691 | * (sb.bucket_size / sb.block_size) | |
692 | * btree_iter_sets, which is more than static MAX_BSETS. | |
cafe5635 | 693 | */ |
d19936a2 | 694 | mempool_t fill_iter; |
cafe5635 | 695 | |
67539e85 | 696 | struct bset_sort_state sort; |
cafe5635 KO |
697 | |
698 | /* List of buckets we're currently writing data to */ | |
699 | struct list_head data_buckets; | |
700 | spinlock_t data_bucket_lock; | |
701 | ||
702 | struct journal journal; | |
703 | ||
704 | #define CONGESTED_MAX 1024 | |
6f10f7d1 | 705 | unsigned int congested_last_us; |
cafe5635 KO |
706 | atomic_t congested; |
707 | ||
708 | /* The rest of this all shows up in sysfs */ | |
6f10f7d1 CL |
709 | unsigned int congested_read_threshold_us; |
710 | unsigned int congested_write_threshold_us; | |
cafe5635 | 711 | |
cafe5635 KO |
712 | struct time_stats btree_gc_time; |
713 | struct time_stats btree_split_time; | |
cafe5635 | 714 | struct time_stats btree_read_time; |
cafe5635 KO |
715 | |
716 | atomic_long_t cache_read_races; | |
717 | atomic_long_t writeback_keys_done; | |
718 | atomic_long_t writeback_keys_failed; | |
77c320eb | 719 | |
a728eacb | 720 | atomic_long_t reclaim; |
dff90d58 | 721 | atomic_long_t reclaimed_journal_buckets; |
a728eacb | 722 | atomic_long_t flush_write; |
a728eacb | 723 | |
77c320eb KO |
724 | enum { |
725 | ON_ERROR_UNREGISTER, | |
726 | ON_ERROR_PANIC, | |
727 | } on_error; | |
7ba0d830 | 728 | #define DEFAULT_IO_ERROR_LIMIT 8 |
6f10f7d1 CL |
729 | unsigned int error_limit; |
730 | unsigned int error_decay; | |
77c320eb | 731 | |
cafe5635 | 732 | unsigned short journal_delay_ms; |
a85e968e | 733 | bool expensive_debug_checks; |
6f10f7d1 CL |
734 | unsigned int verify:1; |
735 | unsigned int key_merging_disabled:1; | |
736 | unsigned int gc_always_rewrite:1; | |
737 | unsigned int shrinker_disabled:1; | |
738 | unsigned int copy_gc_enabled:1; | |
c5fcdedc | 739 | unsigned int idle_max_writeback_rate_enabled:1; |
cafe5635 KO |
740 | |
741 | #define BUCKET_HASH_BITS 12 | |
742 | struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; | |
743 | }; | |
744 | ||
cafe5635 | 745 | struct bbio { |
6f10f7d1 | 746 | unsigned int submit_time_us; |
cafe5635 KO |
747 | union { |
748 | struct bkey key; | |
749 | uint64_t _pad[3]; | |
750 | /* | |
751 | * We only need pad = 3 here because we only ever carry around a | |
752 | * single pointer - i.e. the pointer we're doing io to/from. | |
753 | */ | |
754 | }; | |
755 | struct bio bio; | |
756 | }; | |
757 | ||
cafe5635 | 758 | #define BTREE_PRIO USHRT_MAX |
e0a985a4 | 759 | #define INITIAL_PRIO 32768U |
cafe5635 KO |
760 | |
761 | #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE) | |
762 | #define btree_blocks(b) \ | |
6f10f7d1 | 763 | ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) |
cafe5635 KO |
764 | |
765 | #define btree_default_blocks(c) \ | |
6f10f7d1 | 766 | ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) |
cafe5635 | 767 | |
63a96c05 | 768 | #define bucket_bytes(ca) ((ca)->sb.bucket_size << 9) |
4e1ebae3 | 769 | #define block_bytes(ca) ((ca)->sb.block_size << 9) |
cafe5635 | 770 | |
de1fafab CL |
771 | static inline unsigned int meta_bucket_pages(struct cache_sb *sb) |
772 | { | |
773 | unsigned int n, max_pages; | |
774 | ||
775 | max_pages = min_t(unsigned int, | |
776 | __rounddown_pow_of_two(USHRT_MAX) / PAGE_SECTORS, | |
777 | MAX_ORDER_NR_PAGES); | |
778 | ||
779 | n = sb->bucket_size / PAGE_SECTORS; | |
780 | if (n > max_pages) | |
781 | n = max_pages; | |
782 | ||
783 | return n; | |
784 | } | |
785 | ||
786 | static inline unsigned int meta_bucket_bytes(struct cache_sb *sb) | |
787 | { | |
788 | return meta_bucket_pages(sb) << PAGE_SHIFT; | |
789 | } | |
790 | ||
c954ac8d CL |
791 | #define prios_per_bucket(ca) \ |
792 | ((meta_bucket_bytes(&(ca)->sb) - sizeof(struct prio_set)) / \ | |
cafe5635 | 793 | sizeof(struct bucket_disk)) |
c954ac8d CL |
794 | |
795 | #define prio_buckets(ca) \ | |
796 | DIV_ROUND_UP((size_t) (ca)->sb.nbuckets, prios_per_bucket(ca)) | |
cafe5635 | 797 | |
cafe5635 KO |
798 | static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) |
799 | { | |
800 | return s >> c->bucket_bits; | |
801 | } | |
802 | ||
803 | static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) | |
804 | { | |
805 | return ((sector_t) b) << c->bucket_bits; | |
806 | } | |
807 | ||
808 | static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) | |
809 | { | |
4a784266 | 810 | return s & (c->cache->sb.bucket_size - 1); |
cafe5635 KO |
811 | } |
812 | ||
cafe5635 KO |
813 | static inline size_t PTR_BUCKET_NR(struct cache_set *c, |
814 | const struct bkey *k, | |
6f10f7d1 | 815 | unsigned int ptr) |
cafe5635 KO |
816 | { |
817 | return sector_to_bucket(c, PTR_OFFSET(k, ptr)); | |
818 | } | |
819 | ||
820 | static inline struct bucket *PTR_BUCKET(struct cache_set *c, | |
821 | const struct bkey *k, | |
6f10f7d1 | 822 | unsigned int ptr) |
cafe5635 | 823 | { |
11e9560e | 824 | return c->cache->buckets + PTR_BUCKET_NR(c, k, ptr); |
cafe5635 KO |
825 | } |
826 | ||
9a02b7ee KO |
827 | static inline uint8_t gen_after(uint8_t a, uint8_t b) |
828 | { | |
829 | uint8_t r = a - b; | |
1fae7cf0 | 830 | |
9a02b7ee KO |
831 | return r > 128U ? 0 : r; |
832 | } | |
833 | ||
834 | static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, | |
6f10f7d1 | 835 | unsigned int i) |
9a02b7ee KO |
836 | { |
837 | return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); | |
838 | } | |
839 | ||
840 | static inline bool ptr_available(struct cache_set *c, const struct bkey *k, | |
6f10f7d1 | 841 | unsigned int i) |
9a02b7ee | 842 | { |
11e9560e | 843 | return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && c->cache; |
9a02b7ee KO |
844 | } |
845 | ||
cafe5635 KO |
846 | /* Btree key macros */ |
847 | ||
cafe5635 KO |
848 | /* |
849 | * This is used for various on disk data structures - cache_sb, prio_set, bset, | |
850 | * jset: The checksum is _always_ the first 8 bytes of these structs | |
851 | */ | |
852 | #define csum_set(i) \ | |
169ef1cf | 853 | bch_crc64(((void *) (i)) + sizeof(uint64_t), \ |
fafff81c KO |
854 | ((void *) bset_bkey_last(i)) - \ |
855 | (((void *) (i)) + sizeof(uint64_t))) | |
cafe5635 KO |
856 | |
857 | /* Error handling macros */ | |
858 | ||
859 | #define btree_bug(b, ...) \ | |
860 | do { \ | |
861 | if (bch_cache_set_error((b)->c, __VA_ARGS__)) \ | |
862 | dump_stack(); \ | |
863 | } while (0) | |
864 | ||
865 | #define cache_bug(c, ...) \ | |
866 | do { \ | |
867 | if (bch_cache_set_error(c, __VA_ARGS__)) \ | |
868 | dump_stack(); \ | |
869 | } while (0) | |
870 | ||
871 | #define btree_bug_on(cond, b, ...) \ | |
872 | do { \ | |
873 | if (cond) \ | |
874 | btree_bug(b, __VA_ARGS__); \ | |
875 | } while (0) | |
876 | ||
877 | #define cache_bug_on(cond, c, ...) \ | |
878 | do { \ | |
879 | if (cond) \ | |
880 | cache_bug(c, __VA_ARGS__); \ | |
881 | } while (0) | |
882 | ||
883 | #define cache_set_err_on(cond, c, ...) \ | |
884 | do { \ | |
885 | if (cond) \ | |
886 | bch_cache_set_error(c, __VA_ARGS__); \ | |
887 | } while (0) | |
888 | ||
889 | /* Looping macros */ | |
890 | ||
cafe5635 KO |
891 | #define for_each_bucket(b, ca) \ |
892 | for (b = (ca)->buckets + (ca)->sb.first_bucket; \ | |
893 | b < (ca)->buckets + (ca)->sb.nbuckets; b++) | |
894 | ||
cafe5635 KO |
895 | static inline void cached_dev_put(struct cached_dev *dc) |
896 | { | |
3b304d24 | 897 | if (refcount_dec_and_test(&dc->count)) |
cafe5635 KO |
898 | schedule_work(&dc->detach); |
899 | } | |
900 | ||
901 | static inline bool cached_dev_get(struct cached_dev *dc) | |
902 | { | |
3b304d24 | 903 | if (!refcount_inc_not_zero(&dc->count)) |
cafe5635 KO |
904 | return false; |
905 | ||
906 | /* Paired with the mb in cached_dev_attach */ | |
4e857c58 | 907 | smp_mb__after_atomic(); |
cafe5635 KO |
908 | return true; |
909 | } | |
910 | ||
911 | /* | |
912 | * bucket_gc_gen() returns the difference between the bucket's current gen and | |
913 | * the oldest gen of any pointer into that bucket in the btree (last_gc). | |
cafe5635 KO |
914 | */ |
915 | ||
916 | static inline uint8_t bucket_gc_gen(struct bucket *b) | |
917 | { | |
918 | return b->gen - b->last_gc; | |
919 | } | |
920 | ||
cafe5635 | 921 | #define BUCKET_GC_GEN_MAX 96U |
cafe5635 KO |
922 | |
923 | #define kobj_attribute_write(n, fn) \ | |
958bf494 | 924 | static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn) |
cafe5635 KO |
925 | |
926 | #define kobj_attribute_rw(n, show, store) \ | |
927 | static struct kobj_attribute ksysfs_##n = \ | |
958bf494 | 928 | __ATTR(n, 0600, show, store) |
cafe5635 | 929 | |
119ba0f8 KO |
930 | static inline void wake_up_allocators(struct cache_set *c) |
931 | { | |
08fdb2cd | 932 | struct cache *ca = c->cache; |
119ba0f8 | 933 | |
08fdb2cd | 934 | wake_up_process(ca->alloc_thread); |
119ba0f8 KO |
935 | } |
936 | ||
771f393e CL |
937 | static inline void closure_bio_submit(struct cache_set *c, |
938 | struct bio *bio, | |
939 | struct closure *cl) | |
940 | { | |
941 | closure_get(cl); | |
942 | if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) { | |
943 | bio->bi_status = BLK_STS_IOERR; | |
944 | bio_endio(bio); | |
945 | return; | |
946 | } | |
ed00aabd | 947 | submit_bio_noacct(bio); |
771f393e CL |
948 | } |
949 | ||
950 | /* | |
951 | * Prevent the kthread exits directly, and make sure when kthread_stop() | |
952 | * is called to stop a kthread, it is still alive. If a kthread might be | |
953 | * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is | |
954 | * necessary before the kthread returns. | |
955 | */ | |
956 | static inline void wait_for_kthread_stop(void) | |
957 | { | |
958 | while (!kthread_should_stop()) { | |
959 | set_current_state(TASK_INTERRUPTIBLE); | |
960 | schedule(); | |
961 | } | |
962 | } | |
963 | ||
cafe5635 KO |
964 | /* Forward declarations */ |
965 | ||
c7b7bd07 | 966 | void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio); |
fc2d5988 CL |
967 | void bch_count_io_errors(struct cache *ca, blk_status_t error, |
968 | int is_read, const char *m); | |
969 | void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, | |
970 | blk_status_t error, const char *m); | |
971 | void bch_bbio_endio(struct cache_set *c, struct bio *bio, | |
972 | blk_status_t error, const char *m); | |
973 | void bch_bbio_free(struct bio *bio, struct cache_set *c); | |
974 | struct bio *bch_bbio_alloc(struct cache_set *c); | |
975 | ||
976 | void __bch_submit_bbio(struct bio *bio, struct cache_set *c); | |
977 | void bch_submit_bbio(struct bio *bio, struct cache_set *c, | |
978 | struct bkey *k, unsigned int ptr); | |
979 | ||
980 | uint8_t bch_inc_gen(struct cache *ca, struct bucket *b); | |
981 | void bch_rescale_priorities(struct cache_set *c, int sectors); | |
982 | ||
983 | bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b); | |
984 | void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b); | |
985 | ||
986 | void __bch_bucket_free(struct cache *ca, struct bucket *b); | |
987 | void bch_bucket_free(struct cache_set *c, struct bkey *k); | |
988 | ||
989 | long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait); | |
990 | int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, | |
17e4aed8 | 991 | struct bkey *k, bool wait); |
fc2d5988 | 992 | int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, |
17e4aed8 | 993 | struct bkey *k, bool wait); |
fc2d5988 CL |
994 | bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, |
995 | unsigned int sectors, unsigned int write_point, | |
996 | unsigned int write_prio, bool wait); | |
c7b7bd07 | 997 | bool bch_cached_dev_error(struct cached_dev *dc); |
cafe5635 KO |
998 | |
999 | __printf(2, 3) | |
fc2d5988 | 1000 | bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...); |
cafe5635 | 1001 | |
84c529ae | 1002 | int bch_prio_write(struct cache *ca, bool wait); |
fc2d5988 | 1003 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); |
cafe5635 | 1004 | |
72a44517 | 1005 | extern struct workqueue_struct *bcache_wq; |
0f843e65 | 1006 | extern struct workqueue_struct *bch_journal_wq; |
afe78ab4 | 1007 | extern struct workqueue_struct *bch_flush_wq; |
cafe5635 KO |
1008 | extern struct mutex bch_register_lock; |
1009 | extern struct list_head bch_cache_sets; | |
1010 | ||
b98dd0b0 TW |
1011 | extern const struct kobj_type bch_cached_dev_ktype; |
1012 | extern const struct kobj_type bch_flash_dev_ktype; | |
1013 | extern const struct kobj_type bch_cache_set_ktype; | |
1014 | extern const struct kobj_type bch_cache_set_internal_ktype; | |
1015 | extern const struct kobj_type bch_cache_ktype; | |
cafe5635 | 1016 | |
fc2d5988 CL |
1017 | void bch_cached_dev_release(struct kobject *kobj); |
1018 | void bch_flash_dev_release(struct kobject *kobj); | |
1019 | void bch_cache_set_release(struct kobject *kobj); | |
1020 | void bch_cache_release(struct kobject *kobj); | |
cafe5635 | 1021 | |
fc2d5988 CL |
1022 | int bch_uuid_write(struct cache_set *c); |
1023 | void bcache_write_super(struct cache_set *c); | |
cafe5635 KO |
1024 | |
1025 | int bch_flash_dev_create(struct cache_set *c, uint64_t size); | |
1026 | ||
fc2d5988 CL |
1027 | int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, |
1028 | uint8_t *set_uuid); | |
1029 | void bch_cached_dev_detach(struct cached_dev *dc); | |
0b13efec | 1030 | int bch_cached_dev_run(struct cached_dev *dc); |
fc2d5988 CL |
1031 | void bcache_device_stop(struct bcache_device *d); |
1032 | ||
1033 | void bch_cache_set_unregister(struct cache_set *c); | |
1034 | void bch_cache_set_stop(struct cache_set *c); | |
1035 | ||
1036 | struct cache_set *bch_cache_set_alloc(struct cache_sb *sb); | |
1037 | void bch_btree_cache_free(struct cache_set *c); | |
1038 | int bch_btree_cache_alloc(struct cache_set *c); | |
1039 | void bch_moving_init_cache_set(struct cache_set *c); | |
1040 | int bch_open_buckets_alloc(struct cache_set *c); | |
1041 | void bch_open_buckets_free(struct cache_set *c); | |
cafe5635 | 1042 | |
119ba0f8 | 1043 | int bch_cache_allocator_start(struct cache *ca); |
cafe5635 KO |
1044 | |
1045 | void bch_debug_exit(void); | |
91bafdf0 | 1046 | void bch_debug_init(void); |
cafe5635 KO |
1047 | void bch_request_exit(void); |
1048 | int bch_request_init(void); | |
9f233ffe KK |
1049 | void bch_btree_exit(void); |
1050 | int bch_btree_init(void); | |
cafe5635 KO |
1051 | |
1052 | #endif /* _BCACHE_H */ |