Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | #ifndef _BCACHE_H |
2 | #define _BCACHE_H | |
3 | ||
4 | /* | |
5 | * SOME HIGH LEVEL CODE DOCUMENTATION: | |
6 | * | |
7 | * Bcache mostly works with cache sets, cache devices, and backing devices. | |
8 | * | |
9 | * Support for multiple cache devices hasn't quite been finished off yet, but | |
10 | * it's about 95% plumbed through. A cache set and its cache devices is sort of | |
11 | * like a md raid array and its component devices. Most of the code doesn't care | |
12 | * about individual cache devices, the main abstraction is the cache set. | |
13 | * | |
14 | * Multiple cache devices is intended to give us the ability to mirror dirty | |
15 | * cached data and metadata, without mirroring clean cached data. | |
16 | * | |
17 | * Backing devices are different, in that they have a lifetime independent of a | |
18 | * cache set. When you register a newly formatted backing device it'll come up | |
19 | * in passthrough mode, and then you can attach and detach a backing device from | |
20 | * a cache set at runtime - while it's mounted and in use. Detaching implicitly | |
21 | * invalidates any cached data for that backing device. | |
22 | * | |
23 | * A cache set can have multiple (many) backing devices attached to it. | |
24 | * | |
25 | * There's also flash only volumes - this is the reason for the distinction | |
26 | * between struct cached_dev and struct bcache_device. A flash only volume | |
27 | * works much like a bcache device that has a backing device, except the | |
28 | * "cached" data is always dirty. The end result is that we get thin | |
29 | * provisioning with very little additional code. | |
30 | * | |
31 | * Flash only volumes work but they're not production ready because the moving | |
32 | * garbage collector needs more work. More on that later. | |
33 | * | |
34 | * BUCKETS/ALLOCATION: | |
35 | * | |
36 | * Bcache is primarily designed for caching, which means that in normal | |
37 | * operation all of our available space will be allocated. Thus, we need an | |
38 | * efficient way of deleting things from the cache so we can write new things to | |
39 | * it. | |
40 | * | |
41 | * To do this, we first divide the cache device up into buckets. A bucket is the | |
42 | * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+ | |
43 | * works efficiently. | |
44 | * | |
45 | * Each bucket has a 16 bit priority, and an 8 bit generation associated with | |
46 | * it. The gens and priorities for all the buckets are stored contiguously and | |
47 | * packed on disk (in a linked list of buckets - aside from the superblock, all | |
48 | * of bcache's metadata is stored in buckets). | |
49 | * | |
50 | * The priority is used to implement an LRU. We reset a bucket's priority when | |
51 | * we allocate it or on cache it, and every so often we decrement the priority | |
52 | * of each bucket. It could be used to implement something more sophisticated, | |
53 | * if anyone ever gets around to it. | |
54 | * | |
55 | * The generation is used for invalidating buckets. Each pointer also has an 8 | |
56 | * bit generation embedded in it; for a pointer to be considered valid, its gen | |
57 | * must match the gen of the bucket it points into. Thus, to reuse a bucket all | |
58 | * we have to do is increment its gen (and write its new gen to disk; we batch | |
59 | * this up). | |
60 | * | |
61 | * Bcache is entirely COW - we never write twice to a bucket, even buckets that | |
62 | * contain metadata (including btree nodes). | |
63 | * | |
64 | * THE BTREE: | |
65 | * | |
66 | * Bcache is in large part design around the btree. | |
67 | * | |
68 | * At a high level, the btree is just an index of key -> ptr tuples. | |
69 | * | |
70 | * Keys represent extents, and thus have a size field. Keys also have a variable | |
71 | * number of pointers attached to them (potentially zero, which is handy for | |
72 | * invalidating the cache). | |
73 | * | |
74 | * The key itself is an inode:offset pair. The inode number corresponds to a | |
75 | * backing device or a flash only volume. The offset is the ending offset of the | |
76 | * extent within the inode - not the starting offset; this makes lookups | |
77 | * slightly more convenient. | |
78 | * | |
79 | * Pointers contain the cache device id, the offset on that device, and an 8 bit | |
80 | * generation number. More on the gen later. | |
81 | * | |
82 | * Index lookups are not fully abstracted - cache lookups in particular are | |
83 | * still somewhat mixed in with the btree code, but things are headed in that | |
84 | * direction. | |
85 | * | |
86 | * Updates are fairly well abstracted, though. There are two different ways of | |
87 | * updating the btree; insert and replace. | |
88 | * | |
89 | * BTREE_INSERT will just take a list of keys and insert them into the btree - | |
90 | * overwriting (possibly only partially) any extents they overlap with. This is | |
91 | * used to update the index after a write. | |
92 | * | |
93 | * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is | |
94 | * overwriting a key that matches another given key. This is used for inserting | |
95 | * data into the cache after a cache miss, and for background writeback, and for | |
96 | * the moving garbage collector. | |
97 | * | |
98 | * There is no "delete" operation; deleting things from the index is | |
99 | * accomplished by either by invalidating pointers (by incrementing a bucket's | |
100 | * gen) or by inserting a key with 0 pointers - which will overwrite anything | |
101 | * previously present at that location in the index. | |
102 | * | |
103 | * This means that there are always stale/invalid keys in the btree. They're | |
104 | * filtered out by the code that iterates through a btree node, and removed when | |
105 | * a btree node is rewritten. | |
106 | * | |
107 | * BTREE NODES: | |
108 | * | |
109 | * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and | |
110 | * free smaller than a bucket - so, that's how big our btree nodes are. | |
111 | * | |
112 | * (If buckets are really big we'll only use part of the bucket for a btree node | |
113 | * - no less than 1/4th - but a bucket still contains no more than a single | |
114 | * btree node. I'd actually like to change this, but for now we rely on the | |
115 | * bucket's gen for deleting btree nodes when we rewrite/split a node.) | |
116 | * | |
117 | * Anyways, btree nodes are big - big enough to be inefficient with a textbook | |
118 | * btree implementation. | |
119 | * | |
120 | * The way this is solved is that btree nodes are internally log structured; we | |
121 | * can append new keys to an existing btree node without rewriting it. This | |
122 | * means each set of keys we write is sorted, but the node is not. | |
123 | * | |
124 | * We maintain this log structure in memory - keeping 1Mb of keys sorted would | |
125 | * be expensive, and we have to distinguish between the keys we have written and | |
126 | * the keys we haven't. So to do a lookup in a btree node, we have to search | |
127 | * each sorted set. But we do merge written sets together lazily, so the cost of | |
128 | * these extra searches is quite low (normally most of the keys in a btree node | |
129 | * will be in one big set, and then there'll be one or two sets that are much | |
130 | * smaller). | |
131 | * | |
132 | * This log structure makes bcache's btree more of a hybrid between a | |
133 | * conventional btree and a compacting data structure, with some of the | |
134 | * advantages of both. | |
135 | * | |
136 | * GARBAGE COLLECTION: | |
137 | * | |
138 | * We can't just invalidate any bucket - it might contain dirty data or | |
139 | * metadata. If it once contained dirty data, other writes might overwrite it | |
140 | * later, leaving no valid pointers into that bucket in the index. | |
141 | * | |
142 | * Thus, the primary purpose of garbage collection is to find buckets to reuse. | |
143 | * It also counts how much valid data it each bucket currently contains, so that | |
144 | * allocation can reuse buckets sooner when they've been mostly overwritten. | |
145 | * | |
146 | * It also does some things that are really internal to the btree | |
147 | * implementation. If a btree node contains pointers that are stale by more than | |
148 | * some threshold, it rewrites the btree node to avoid the bucket's generation | |
149 | * wrapping around. It also merges adjacent btree nodes if they're empty enough. | |
150 | * | |
151 | * THE JOURNAL: | |
152 | * | |
153 | * Bcache's journal is not necessary for consistency; we always strictly | |
154 | * order metadata writes so that the btree and everything else is consistent on | |
155 | * disk in the event of an unclean shutdown, and in fact bcache had writeback | |
156 | * caching (with recovery from unclean shutdown) before journalling was | |
157 | * implemented. | |
158 | * | |
159 | * Rather, the journal is purely a performance optimization; we can't complete a | |
160 | * write until we've updated the index on disk, otherwise the cache would be | |
161 | * inconsistent in the event of an unclean shutdown. This means that without the | |
162 | * journal, on random write workloads we constantly have to update all the leaf | |
163 | * nodes in the btree, and those writes will be mostly empty (appending at most | |
164 | * a few keys each) - highly inefficient in terms of amount of metadata writes, | |
165 | * and it puts more strain on the various btree resorting/compacting code. | |
166 | * | |
167 | * The journal is just a log of keys we've inserted; on startup we just reinsert | |
168 | * all the keys in the open journal entries. That means that when we're updating | |
169 | * a node in the btree, we can wait until a 4k block of keys fills up before | |
170 | * writing them out. | |
171 | * | |
172 | * For simplicity, we only journal updates to leaf nodes; updates to parent | |
173 | * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth | |
174 | * the complexity to deal with journalling them (in particular, journal replay) | |
175 | * - updates to non leaf nodes just happen synchronously (see btree_split()). | |
176 | */ | |
177 | ||
178 | #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ | |
179 | ||
81ab4190 | 180 | #include <linux/bcache.h> |
cafe5635 | 181 | #include <linux/bio.h> |
cafe5635 KO |
182 | #include <linux/kobject.h> |
183 | #include <linux/list.h> | |
184 | #include <linux/mutex.h> | |
185 | #include <linux/rbtree.h> | |
186 | #include <linux/rwsem.h> | |
187 | #include <linux/types.h> | |
188 | #include <linux/workqueue.h> | |
189 | ||
190 | #include "util.h" | |
191 | #include "closure.h" | |
192 | ||
193 | struct bucket { | |
194 | atomic_t pin; | |
195 | uint16_t prio; | |
196 | uint8_t gen; | |
197 | uint8_t disk_gen; | |
198 | uint8_t last_gc; /* Most out of date gen in the btree */ | |
199 | uint8_t gc_gen; | |
200 | uint16_t gc_mark; | |
201 | }; | |
202 | ||
203 | /* | |
204 | * I'd use bitfields for these, but I don't trust the compiler not to screw me | |
205 | * as multiple threads touch struct bucket without locking | |
206 | */ | |
207 | ||
208 | BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); | |
209 | #define GC_MARK_RECLAIMABLE 0 | |
210 | #define GC_MARK_DIRTY 1 | |
211 | #define GC_MARK_METADATA 2 | |
212 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); | |
213 | ||
cafe5635 KO |
214 | #include "journal.h" |
215 | #include "stats.h" | |
216 | struct search; | |
217 | struct btree; | |
218 | struct keybuf; | |
219 | ||
220 | struct keybuf_key { | |
221 | struct rb_node node; | |
222 | BKEY_PADDED(key); | |
223 | void *private; | |
224 | }; | |
225 | ||
cafe5635 | 226 | struct keybuf { |
cafe5635 KO |
227 | struct bkey last_scanned; |
228 | spinlock_t lock; | |
229 | ||
230 | /* | |
231 | * Beginning and end of range in rb tree - so that we can skip taking | |
232 | * lock and checking the rb tree when we need to check for overlapping | |
233 | * keys. | |
234 | */ | |
235 | struct bkey start; | |
236 | struct bkey end; | |
237 | ||
238 | struct rb_root keys; | |
239 | ||
48a915a8 | 240 | #define KEYBUF_NR 500 |
cafe5635 KO |
241 | DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR); |
242 | }; | |
243 | ||
244 | struct bio_split_pool { | |
245 | struct bio_set *bio_split; | |
246 | mempool_t *bio_split_hook; | |
247 | }; | |
248 | ||
249 | struct bio_split_hook { | |
250 | struct closure cl; | |
251 | struct bio_split_pool *p; | |
252 | struct bio *bio; | |
253 | bio_end_io_t *bi_end_io; | |
254 | void *bi_private; | |
255 | }; | |
256 | ||
257 | struct bcache_device { | |
258 | struct closure cl; | |
259 | ||
260 | struct kobject kobj; | |
261 | ||
262 | struct cache_set *c; | |
263 | unsigned id; | |
264 | #define BCACHEDEVNAME_SIZE 12 | |
265 | char name[BCACHEDEVNAME_SIZE]; | |
266 | ||
267 | struct gendisk *disk; | |
268 | ||
c4d951dd KO |
269 | unsigned long flags; |
270 | #define BCACHE_DEV_CLOSING 0 | |
271 | #define BCACHE_DEV_DETACHING 1 | |
272 | #define BCACHE_DEV_UNLINK_DONE 2 | |
cafe5635 | 273 | |
48a915a8 | 274 | unsigned nr_stripes; |
2d679fc7 | 275 | unsigned stripe_size; |
279afbad | 276 | atomic_t *stripe_sectors_dirty; |
48a915a8 | 277 | unsigned long *full_dirty_stripes; |
279afbad | 278 | |
cafe5635 KO |
279 | unsigned long sectors_dirty_last; |
280 | long sectors_dirty_derivative; | |
281 | ||
cafe5635 KO |
282 | struct bio_set *bio_split; |
283 | ||
284 | unsigned data_csum:1; | |
285 | ||
286 | int (*cache_miss)(struct btree *, struct search *, | |
287 | struct bio *, unsigned); | |
288 | int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long); | |
289 | ||
290 | struct bio_split_pool bio_split_hook; | |
291 | }; | |
292 | ||
293 | struct io { | |
294 | /* Used to track sequential IO so it can be skipped */ | |
295 | struct hlist_node hash; | |
296 | struct list_head lru; | |
297 | ||
298 | unsigned long jiffies; | |
299 | unsigned sequential; | |
300 | sector_t last; | |
301 | }; | |
302 | ||
303 | struct cached_dev { | |
304 | struct list_head list; | |
305 | struct bcache_device disk; | |
306 | struct block_device *bdev; | |
307 | ||
308 | struct cache_sb sb; | |
309 | struct bio sb_bio; | |
310 | struct bio_vec sb_bv[1]; | |
311 | struct closure_with_waitlist sb_write; | |
312 | ||
313 | /* Refcount on the cache set. Always nonzero when we're caching. */ | |
314 | atomic_t count; | |
315 | struct work_struct detach; | |
316 | ||
317 | /* | |
318 | * Device might not be running if it's dirty and the cache set hasn't | |
319 | * showed up yet. | |
320 | */ | |
321 | atomic_t running; | |
322 | ||
323 | /* | |
324 | * Writes take a shared lock from start to finish; scanning for dirty | |
325 | * data to refill the rb tree requires an exclusive lock. | |
326 | */ | |
327 | struct rw_semaphore writeback_lock; | |
328 | ||
329 | /* | |
330 | * Nonzero, and writeback has a refcount (d->count), iff there is dirty | |
331 | * data in the cache. Protected by writeback_lock; must have an | |
332 | * shared lock to set and exclusive lock to clear. | |
333 | */ | |
334 | atomic_t has_dirty; | |
335 | ||
c2a4f318 | 336 | struct bch_ratelimit writeback_rate; |
cafe5635 KO |
337 | struct delayed_work writeback_rate_update; |
338 | ||
339 | /* | |
340 | * Internal to the writeback code, so read_dirty() can keep track of | |
341 | * where it's at. | |
342 | */ | |
343 | sector_t last_read; | |
344 | ||
c2a4f318 KO |
345 | /* Limit number of writeback bios in flight */ |
346 | struct semaphore in_flight; | |
5e6926da | 347 | struct task_struct *writeback_thread; |
cafe5635 KO |
348 | |
349 | struct keybuf writeback_keys; | |
350 | ||
351 | /* For tracking sequential IO */ | |
352 | #define RECENT_IO_BITS 7 | |
353 | #define RECENT_IO (1 << RECENT_IO_BITS) | |
354 | struct io io[RECENT_IO]; | |
355 | struct hlist_head io_hash[RECENT_IO + 1]; | |
356 | struct list_head io_lru; | |
357 | spinlock_t io_lock; | |
358 | ||
359 | struct cache_accounting accounting; | |
360 | ||
361 | /* The rest of this all shows up in sysfs */ | |
362 | unsigned sequential_cutoff; | |
363 | unsigned readahead; | |
364 | ||
cafe5635 | 365 | unsigned verify:1; |
5ceaaad7 | 366 | unsigned bypass_torture_test:1; |
cafe5635 | 367 | |
72c27061 | 368 | unsigned partial_stripes_expensive:1; |
cafe5635 KO |
369 | unsigned writeback_metadata:1; |
370 | unsigned writeback_running:1; | |
371 | unsigned char writeback_percent; | |
372 | unsigned writeback_delay; | |
373 | ||
374 | int writeback_rate_change; | |
375 | int64_t writeback_rate_derivative; | |
376 | uint64_t writeback_rate_target; | |
377 | ||
378 | unsigned writeback_rate_update_seconds; | |
379 | unsigned writeback_rate_d_term; | |
380 | unsigned writeback_rate_p_term_inverse; | |
381 | unsigned writeback_rate_d_smooth; | |
382 | }; | |
383 | ||
384 | enum alloc_watermarks { | |
385 | WATERMARK_PRIO, | |
386 | WATERMARK_METADATA, | |
387 | WATERMARK_MOVINGGC, | |
388 | WATERMARK_NONE, | |
389 | WATERMARK_MAX | |
390 | }; | |
391 | ||
392 | struct cache { | |
393 | struct cache_set *set; | |
394 | struct cache_sb sb; | |
395 | struct bio sb_bio; | |
396 | struct bio_vec sb_bv[1]; | |
397 | ||
398 | struct kobject kobj; | |
399 | struct block_device *bdev; | |
400 | ||
401 | unsigned watermark[WATERMARK_MAX]; | |
402 | ||
119ba0f8 | 403 | struct task_struct *alloc_thread; |
cafe5635 KO |
404 | |
405 | struct closure prio; | |
406 | struct prio_set *disk_buckets; | |
407 | ||
408 | /* | |
409 | * When allocating new buckets, prio_write() gets first dibs - since we | |
410 | * may not be allocate at all without writing priorities and gens. | |
411 | * prio_buckets[] contains the last buckets we wrote priorities to (so | |
412 | * gc can mark them as metadata), prio_next[] contains the buckets | |
413 | * allocated for the next prio write. | |
414 | */ | |
415 | uint64_t *prio_buckets; | |
416 | uint64_t *prio_last_buckets; | |
417 | ||
418 | /* | |
419 | * free: Buckets that are ready to be used | |
420 | * | |
421 | * free_inc: Incoming buckets - these are buckets that currently have | |
422 | * cached data in them, and we can't reuse them until after we write | |
423 | * their new gen to disk. After prio_write() finishes writing the new | |
424 | * gens/prios, they'll be moved to the free list (and possibly discarded | |
425 | * in the process) | |
426 | * | |
427 | * unused: GC found nothing pointing into these buckets (possibly | |
428 | * because all the data they contained was overwritten), so we only | |
429 | * need to discard them before they can be moved to the free list. | |
430 | */ | |
431 | DECLARE_FIFO(long, free); | |
432 | DECLARE_FIFO(long, free_inc); | |
433 | DECLARE_FIFO(long, unused); | |
434 | ||
435 | size_t fifo_last_bucket; | |
436 | ||
437 | /* Allocation stuff: */ | |
438 | struct bucket *buckets; | |
439 | ||
440 | DECLARE_HEAP(struct bucket *, heap); | |
441 | ||
442 | /* | |
443 | * max(gen - disk_gen) for all buckets. When it gets too big we have to | |
444 | * call prio_write() to keep gens from wrapping. | |
445 | */ | |
446 | uint8_t need_save_prio; | |
447 | unsigned gc_move_threshold; | |
448 | ||
449 | /* | |
450 | * If nonzero, we know we aren't going to find any buckets to invalidate | |
451 | * until a gc finishes - otherwise we could pointlessly burn a ton of | |
452 | * cpu | |
453 | */ | |
454 | unsigned invalidate_needs_gc:1; | |
455 | ||
456 | bool discard; /* Get rid of? */ | |
457 | ||
cafe5635 KO |
458 | struct journal_device journal; |
459 | ||
460 | /* The rest of this all shows up in sysfs */ | |
461 | #define IO_ERROR_SHIFT 20 | |
462 | atomic_t io_errors; | |
463 | atomic_t io_count; | |
464 | ||
465 | atomic_long_t meta_sectors_written; | |
466 | atomic_long_t btree_sectors_written; | |
467 | atomic_long_t sectors_written; | |
468 | ||
469 | struct bio_split_pool bio_split_hook; | |
470 | }; | |
471 | ||
472 | struct gc_stat { | |
473 | size_t nodes; | |
474 | size_t key_bytes; | |
475 | ||
476 | size_t nkeys; | |
477 | uint64_t data; /* sectors */ | |
cafe5635 KO |
478 | unsigned in_use; /* percent */ |
479 | }; | |
480 | ||
481 | /* | |
482 | * Flag bits, for how the cache set is shutting down, and what phase it's at: | |
483 | * | |
484 | * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching | |
485 | * all the backing devices first (their cached data gets invalidated, and they | |
486 | * won't automatically reattach). | |
487 | * | |
488 | * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; | |
489 | * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. | |
490 | * flushing dirty data). | |
cafe5635 KO |
491 | */ |
492 | #define CACHE_SET_UNREGISTERING 0 | |
493 | #define CACHE_SET_STOPPING 1 | |
cafe5635 KO |
494 | |
495 | struct cache_set { | |
496 | struct closure cl; | |
497 | ||
498 | struct list_head list; | |
499 | struct kobject kobj; | |
500 | struct kobject internal; | |
501 | struct dentry *debug; | |
502 | struct cache_accounting accounting; | |
503 | ||
504 | unsigned long flags; | |
505 | ||
506 | struct cache_sb sb; | |
507 | ||
508 | struct cache *cache[MAX_CACHES_PER_SET]; | |
509 | struct cache *cache_by_alloc[MAX_CACHES_PER_SET]; | |
510 | int caches_loaded; | |
511 | ||
512 | struct bcache_device **devices; | |
513 | struct list_head cached_devs; | |
514 | uint64_t cached_dev_sectors; | |
515 | struct closure caching; | |
516 | ||
517 | struct closure_with_waitlist sb_write; | |
518 | ||
519 | mempool_t *search; | |
520 | mempool_t *bio_meta; | |
521 | struct bio_set *bio_split; | |
522 | ||
523 | /* For the btree cache */ | |
524 | struct shrinker shrink; | |
525 | ||
cafe5635 KO |
526 | /* For the btree cache and anything allocation related */ |
527 | struct mutex bucket_lock; | |
528 | ||
529 | /* log2(bucket_size), in sectors */ | |
530 | unsigned short bucket_bits; | |
531 | ||
532 | /* log2(block_size), in sectors */ | |
533 | unsigned short block_bits; | |
534 | ||
535 | /* | |
536 | * Default number of pages for a new btree node - may be less than a | |
537 | * full bucket | |
538 | */ | |
539 | unsigned btree_pages; | |
540 | ||
541 | /* | |
542 | * Lists of struct btrees; lru is the list for structs that have memory | |
543 | * allocated for actual btree node, freed is for structs that do not. | |
544 | * | |
545 | * We never free a struct btree, except on shutdown - we just put it on | |
546 | * the btree_cache_freed list and reuse it later. This simplifies the | |
547 | * code, and it doesn't cost us much memory as the memory usage is | |
548 | * dominated by buffers that hold the actual btree node data and those | |
549 | * can be freed - and the number of struct btrees allocated is | |
550 | * effectively bounded. | |
551 | * | |
552 | * btree_cache_freeable effectively is a small cache - we use it because | |
553 | * high order page allocations can be rather expensive, and it's quite | |
554 | * common to delete and allocate btree nodes in quick succession. It | |
555 | * should never grow past ~2-3 nodes in practice. | |
556 | */ | |
557 | struct list_head btree_cache; | |
558 | struct list_head btree_cache_freeable; | |
559 | struct list_head btree_cache_freed; | |
560 | ||
561 | /* Number of elements in btree_cache + btree_cache_freeable lists */ | |
562 | unsigned bucket_cache_used; | |
563 | ||
564 | /* | |
565 | * If we need to allocate memory for a new btree node and that | |
566 | * allocation fails, we can cannibalize another node in the btree cache | |
567 | * to satisfy the allocation. However, only one thread can be doing this | |
568 | * at a time, for obvious reasons - try_harder and try_wait are | |
569 | * basically a lock for this that we can wait on asynchronously. The | |
570 | * btree_root() macro releases the lock when it returns. | |
571 | */ | |
e8e1d468 KO |
572 | struct task_struct *try_harder; |
573 | wait_queue_head_t try_wait; | |
cafe5635 KO |
574 | uint64_t try_harder_start; |
575 | ||
576 | /* | |
577 | * When we free a btree node, we increment the gen of the bucket the | |
578 | * node is in - but we can't rewrite the prios and gens until we | |
579 | * finished whatever it is we were doing, otherwise after a crash the | |
580 | * btree node would be freed but for say a split, we might not have the | |
581 | * pointers to the new nodes inserted into the btree yet. | |
582 | * | |
583 | * This is a refcount that blocks prio_write() until the new keys are | |
584 | * written. | |
585 | */ | |
586 | atomic_t prio_blocked; | |
35fcd848 | 587 | wait_queue_head_t bucket_wait; |
cafe5635 KO |
588 | |
589 | /* | |
590 | * For any bio we don't skip we subtract the number of sectors from | |
591 | * rescale; when it hits 0 we rescale all the bucket priorities. | |
592 | */ | |
593 | atomic_t rescale; | |
594 | /* | |
595 | * When we invalidate buckets, we use both the priority and the amount | |
596 | * of good data to determine which buckets to reuse first - to weight | |
597 | * those together consistently we keep track of the smallest nonzero | |
598 | * priority of any bucket. | |
599 | */ | |
600 | uint16_t min_prio; | |
601 | ||
602 | /* | |
603 | * max(gen - gc_gen) for all buckets. When it gets too big we have to gc | |
604 | * to keep gens from wrapping around. | |
605 | */ | |
606 | uint8_t need_gc; | |
607 | struct gc_stat gc_stats; | |
608 | size_t nbuckets; | |
609 | ||
72a44517 | 610 | struct task_struct *gc_thread; |
cafe5635 KO |
611 | /* Where in the btree gc currently is */ |
612 | struct bkey gc_done; | |
613 | ||
614 | /* | |
615 | * The allocation code needs gc_mark in struct bucket to be correct, but | |
616 | * it's not while a gc is in progress. Protected by bucket_lock. | |
617 | */ | |
618 | int gc_mark_valid; | |
619 | ||
620 | /* Counts how many sectors bio_insert has added to the cache */ | |
621 | atomic_t sectors_to_gc; | |
622 | ||
72a44517 | 623 | wait_queue_head_t moving_gc_wait; |
cafe5635 KO |
624 | struct keybuf moving_gc_keys; |
625 | /* Number of moving GC bios in flight */ | |
72a44517 | 626 | struct semaphore moving_in_flight; |
cafe5635 KO |
627 | |
628 | struct btree *root; | |
629 | ||
630 | #ifdef CONFIG_BCACHE_DEBUG | |
631 | struct btree *verify_data; | |
632 | struct mutex verify_lock; | |
633 | #endif | |
634 | ||
635 | unsigned nr_uuids; | |
636 | struct uuid_entry *uuids; | |
637 | BKEY_PADDED(uuid_bucket); | |
638 | struct closure_with_waitlist uuid_write; | |
639 | ||
640 | /* | |
641 | * A btree node on disk could have too many bsets for an iterator to fit | |
57943511 | 642 | * on the stack - have to dynamically allocate them |
cafe5635 | 643 | */ |
57943511 | 644 | mempool_t *fill_iter; |
cafe5635 KO |
645 | |
646 | /* | |
647 | * btree_sort() is a merge sort and requires temporary space - single | |
648 | * element mempool | |
649 | */ | |
650 | struct mutex sort_lock; | |
651 | struct bset *sort; | |
6ded34d1 | 652 | unsigned sort_crit_factor; |
cafe5635 KO |
653 | |
654 | /* List of buckets we're currently writing data to */ | |
655 | struct list_head data_buckets; | |
656 | spinlock_t data_bucket_lock; | |
657 | ||
658 | struct journal journal; | |
659 | ||
660 | #define CONGESTED_MAX 1024 | |
661 | unsigned congested_last_us; | |
662 | atomic_t congested; | |
663 | ||
664 | /* The rest of this all shows up in sysfs */ | |
665 | unsigned congested_read_threshold_us; | |
666 | unsigned congested_write_threshold_us; | |
667 | ||
cafe5635 KO |
668 | struct time_stats sort_time; |
669 | struct time_stats btree_gc_time; | |
670 | struct time_stats btree_split_time; | |
cafe5635 KO |
671 | struct time_stats btree_read_time; |
672 | struct time_stats try_harder_time; | |
673 | ||
674 | atomic_long_t cache_read_races; | |
675 | atomic_long_t writeback_keys_done; | |
676 | atomic_long_t writeback_keys_failed; | |
77c320eb KO |
677 | |
678 | enum { | |
679 | ON_ERROR_UNREGISTER, | |
680 | ON_ERROR_PANIC, | |
681 | } on_error; | |
cafe5635 KO |
682 | unsigned error_limit; |
683 | unsigned error_decay; | |
77c320eb | 684 | |
cafe5635 KO |
685 | unsigned short journal_delay_ms; |
686 | unsigned verify:1; | |
687 | unsigned key_merging_disabled:1; | |
280481d0 | 688 | unsigned expensive_debug_checks:1; |
cafe5635 KO |
689 | unsigned gc_always_rewrite:1; |
690 | unsigned shrinker_disabled:1; | |
691 | unsigned copy_gc_enabled:1; | |
692 | ||
693 | #define BUCKET_HASH_BITS 12 | |
694 | struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; | |
695 | }; | |
696 | ||
cafe5635 KO |
697 | struct bbio { |
698 | unsigned submit_time_us; | |
699 | union { | |
700 | struct bkey key; | |
701 | uint64_t _pad[3]; | |
702 | /* | |
703 | * We only need pad = 3 here because we only ever carry around a | |
704 | * single pointer - i.e. the pointer we're doing io to/from. | |
705 | */ | |
706 | }; | |
707 | struct bio bio; | |
708 | }; | |
709 | ||
710 | static inline unsigned local_clock_us(void) | |
711 | { | |
712 | return local_clock() >> 10; | |
713 | } | |
714 | ||
cafe5635 KO |
715 | #define BTREE_PRIO USHRT_MAX |
716 | #define INITIAL_PRIO 32768 | |
717 | ||
718 | #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE) | |
719 | #define btree_blocks(b) \ | |
720 | ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) | |
721 | ||
722 | #define btree_default_blocks(c) \ | |
723 | ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) | |
724 | ||
725 | #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) | |
726 | #define bucket_bytes(c) ((c)->sb.bucket_size << 9) | |
727 | #define block_bytes(c) ((c)->sb.block_size << 9) | |
728 | ||
729 | #define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t)) | |
730 | #define set_bytes(i) __set_bytes(i, i->keys) | |
731 | ||
732 | #define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c)) | |
733 | #define set_blocks(i, c) __set_blocks(i, (i)->keys, c) | |
734 | ||
735 | #define node(i, j) ((struct bkey *) ((i)->d + (j))) | |
736 | #define end(i) node(i, (i)->keys) | |
737 | ||
738 | #define index(i, b) \ | |
739 | ((size_t) (((void *) i - (void *) (b)->sets[0].data) / \ | |
740 | block_bytes(b->c))) | |
741 | ||
742 | #define btree_data_space(b) (PAGE_SIZE << (b)->page_order) | |
743 | ||
744 | #define prios_per_bucket(c) \ | |
745 | ((bucket_bytes(c) - sizeof(struct prio_set)) / \ | |
746 | sizeof(struct bucket_disk)) | |
747 | #define prio_buckets(c) \ | |
748 | DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c)) | |
749 | ||
cafe5635 KO |
750 | static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) |
751 | { | |
752 | return s >> c->bucket_bits; | |
753 | } | |
754 | ||
755 | static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) | |
756 | { | |
757 | return ((sector_t) b) << c->bucket_bits; | |
758 | } | |
759 | ||
760 | static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) | |
761 | { | |
762 | return s & (c->sb.bucket_size - 1); | |
763 | } | |
764 | ||
765 | static inline struct cache *PTR_CACHE(struct cache_set *c, | |
766 | const struct bkey *k, | |
767 | unsigned ptr) | |
768 | { | |
769 | return c->cache[PTR_DEV(k, ptr)]; | |
770 | } | |
771 | ||
772 | static inline size_t PTR_BUCKET_NR(struct cache_set *c, | |
773 | const struct bkey *k, | |
774 | unsigned ptr) | |
775 | { | |
776 | return sector_to_bucket(c, PTR_OFFSET(k, ptr)); | |
777 | } | |
778 | ||
779 | static inline struct bucket *PTR_BUCKET(struct cache_set *c, | |
780 | const struct bkey *k, | |
781 | unsigned ptr) | |
782 | { | |
783 | return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); | |
784 | } | |
785 | ||
786 | /* Btree key macros */ | |
787 | ||
cafe5635 KO |
788 | static inline void bkey_init(struct bkey *k) |
789 | { | |
81ab4190 | 790 | *k = ZERO_KEY; |
cafe5635 KO |
791 | } |
792 | ||
cafe5635 KO |
793 | /* |
794 | * This is used for various on disk data structures - cache_sb, prio_set, bset, | |
795 | * jset: The checksum is _always_ the first 8 bytes of these structs | |
796 | */ | |
797 | #define csum_set(i) \ | |
169ef1cf | 798 | bch_crc64(((void *) (i)) + sizeof(uint64_t), \ |
cafe5635 KO |
799 | ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t))) |
800 | ||
801 | /* Error handling macros */ | |
802 | ||
803 | #define btree_bug(b, ...) \ | |
804 | do { \ | |
805 | if (bch_cache_set_error((b)->c, __VA_ARGS__)) \ | |
806 | dump_stack(); \ | |
807 | } while (0) | |
808 | ||
809 | #define cache_bug(c, ...) \ | |
810 | do { \ | |
811 | if (bch_cache_set_error(c, __VA_ARGS__)) \ | |
812 | dump_stack(); \ | |
813 | } while (0) | |
814 | ||
815 | #define btree_bug_on(cond, b, ...) \ | |
816 | do { \ | |
817 | if (cond) \ | |
818 | btree_bug(b, __VA_ARGS__); \ | |
819 | } while (0) | |
820 | ||
821 | #define cache_bug_on(cond, c, ...) \ | |
822 | do { \ | |
823 | if (cond) \ | |
824 | cache_bug(c, __VA_ARGS__); \ | |
825 | } while (0) | |
826 | ||
827 | #define cache_set_err_on(cond, c, ...) \ | |
828 | do { \ | |
829 | if (cond) \ | |
830 | bch_cache_set_error(c, __VA_ARGS__); \ | |
831 | } while (0) | |
832 | ||
833 | /* Looping macros */ | |
834 | ||
835 | #define for_each_cache(ca, cs, iter) \ | |
836 | for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++) | |
837 | ||
838 | #define for_each_bucket(b, ca) \ | |
839 | for (b = (ca)->buckets + (ca)->sb.first_bucket; \ | |
840 | b < (ca)->buckets + (ca)->sb.nbuckets; b++) | |
841 | ||
cafe5635 KO |
842 | static inline void cached_dev_put(struct cached_dev *dc) |
843 | { | |
844 | if (atomic_dec_and_test(&dc->count)) | |
845 | schedule_work(&dc->detach); | |
846 | } | |
847 | ||
848 | static inline bool cached_dev_get(struct cached_dev *dc) | |
849 | { | |
850 | if (!atomic_inc_not_zero(&dc->count)) | |
851 | return false; | |
852 | ||
853 | /* Paired with the mb in cached_dev_attach */ | |
854 | smp_mb__after_atomic_inc(); | |
855 | return true; | |
856 | } | |
857 | ||
858 | /* | |
859 | * bucket_gc_gen() returns the difference between the bucket's current gen and | |
860 | * the oldest gen of any pointer into that bucket in the btree (last_gc). | |
861 | * | |
862 | * bucket_disk_gen() returns the difference between the current gen and the gen | |
863 | * on disk; they're both used to make sure gens don't wrap around. | |
864 | */ | |
865 | ||
866 | static inline uint8_t bucket_gc_gen(struct bucket *b) | |
867 | { | |
868 | return b->gen - b->last_gc; | |
869 | } | |
870 | ||
871 | static inline uint8_t bucket_disk_gen(struct bucket *b) | |
872 | { | |
873 | return b->gen - b->disk_gen; | |
874 | } | |
875 | ||
876 | #define BUCKET_GC_GEN_MAX 96U | |
877 | #define BUCKET_DISK_GEN_MAX 64U | |
878 | ||
879 | #define kobj_attribute_write(n, fn) \ | |
880 | static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn) | |
881 | ||
882 | #define kobj_attribute_rw(n, show, store) \ | |
883 | static struct kobj_attribute ksysfs_##n = \ | |
884 | __ATTR(n, S_IWUSR|S_IRUSR, show, store) | |
885 | ||
119ba0f8 KO |
886 | static inline void wake_up_allocators(struct cache_set *c) |
887 | { | |
888 | struct cache *ca; | |
889 | unsigned i; | |
890 | ||
891 | for_each_cache(ca, c, i) | |
892 | wake_up_process(ca->alloc_thread); | |
893 | } | |
894 | ||
cafe5635 KO |
895 | /* Forward declarations */ |
896 | ||
cafe5635 KO |
897 | void bch_count_io_errors(struct cache *, int, const char *); |
898 | void bch_bbio_count_io_errors(struct cache_set *, struct bio *, | |
899 | int, const char *); | |
900 | void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *); | |
901 | void bch_bbio_free(struct bio *, struct cache_set *); | |
902 | struct bio *bch_bbio_alloc(struct cache_set *); | |
903 | ||
cafe5635 KO |
904 | void bch_generic_make_request(struct bio *, struct bio_split_pool *); |
905 | void __bch_submit_bbio(struct bio *, struct cache_set *); | |
906 | void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); | |
907 | ||
908 | uint8_t bch_inc_gen(struct cache *, struct bucket *); | |
909 | void bch_rescale_priorities(struct cache_set *, int); | |
910 | bool bch_bucket_add_unused(struct cache *, struct bucket *); | |
cafe5635 | 911 | |
35fcd848 | 912 | long bch_bucket_alloc(struct cache *, unsigned, bool); |
cafe5635 KO |
913 | void bch_bucket_free(struct cache_set *, struct bkey *); |
914 | ||
915 | int __bch_bucket_alloc_set(struct cache_set *, unsigned, | |
35fcd848 | 916 | struct bkey *, int, bool); |
cafe5635 | 917 | int bch_bucket_alloc_set(struct cache_set *, unsigned, |
35fcd848 | 918 | struct bkey *, int, bool); |
2599b53b KO |
919 | bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned, |
920 | unsigned, unsigned, bool); | |
cafe5635 KO |
921 | |
922 | __printf(2, 3) | |
923 | bool bch_cache_set_error(struct cache_set *, const char *, ...); | |
924 | ||
925 | void bch_prio_write(struct cache *); | |
926 | void bch_write_bdev_super(struct cached_dev *, struct closure *); | |
927 | ||
72a44517 | 928 | extern struct workqueue_struct *bcache_wq; |
cafe5635 KO |
929 | extern const char * const bch_cache_modes[]; |
930 | extern struct mutex bch_register_lock; | |
931 | extern struct list_head bch_cache_sets; | |
932 | ||
933 | extern struct kobj_type bch_cached_dev_ktype; | |
934 | extern struct kobj_type bch_flash_dev_ktype; | |
935 | extern struct kobj_type bch_cache_set_ktype; | |
936 | extern struct kobj_type bch_cache_set_internal_ktype; | |
937 | extern struct kobj_type bch_cache_ktype; | |
938 | ||
939 | void bch_cached_dev_release(struct kobject *); | |
940 | void bch_flash_dev_release(struct kobject *); | |
941 | void bch_cache_set_release(struct kobject *); | |
942 | void bch_cache_release(struct kobject *); | |
943 | ||
944 | int bch_uuid_write(struct cache_set *); | |
945 | void bcache_write_super(struct cache_set *); | |
946 | ||
947 | int bch_flash_dev_create(struct cache_set *c, uint64_t size); | |
948 | ||
949 | int bch_cached_dev_attach(struct cached_dev *, struct cache_set *); | |
950 | void bch_cached_dev_detach(struct cached_dev *); | |
951 | void bch_cached_dev_run(struct cached_dev *); | |
952 | void bcache_device_stop(struct bcache_device *); | |
953 | ||
954 | void bch_cache_set_unregister(struct cache_set *); | |
955 | void bch_cache_set_stop(struct cache_set *); | |
956 | ||
957 | struct cache_set *bch_cache_set_alloc(struct cache_sb *); | |
958 | void bch_btree_cache_free(struct cache_set *); | |
959 | int bch_btree_cache_alloc(struct cache_set *); | |
cafe5635 | 960 | void bch_moving_init_cache_set(struct cache_set *); |
2599b53b KO |
961 | int bch_open_buckets_alloc(struct cache_set *); |
962 | void bch_open_buckets_free(struct cache_set *); | |
cafe5635 | 963 | |
119ba0f8 | 964 | int bch_cache_allocator_start(struct cache *ca); |
cafe5635 KO |
965 | int bch_cache_allocator_init(struct cache *ca); |
966 | ||
967 | void bch_debug_exit(void); | |
968 | int bch_debug_init(struct kobject *); | |
cafe5635 KO |
969 | void bch_request_exit(void); |
970 | int bch_request_init(void); | |
971 | void bch_btree_exit(void); | |
972 | int bch_btree_init(void); | |
973 | ||
974 | #endif /* _BCACHE_H */ |