Commit | Line | Data |
---|---|---|
87418ef9 | 1 | // SPDX-License-Identifier: GPL-2.0 |
cafe5635 KO |
2 | /* |
3 | * bcache setup/teardown code, and some metadata io - read a superblock and | |
4 | * figure out what to do with it. | |
5 | * | |
6 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
7 | * Copyright 2012 Google, Inc. | |
8 | */ | |
9 | ||
10 | #include "bcache.h" | |
11 | #include "btree.h" | |
12 | #include "debug.h" | |
65d45231 | 13 | #include "extents.h" |
cafe5635 | 14 | #include "request.h" |
279afbad | 15 | #include "writeback.h" |
cafe5635 | 16 | |
c37511b8 | 17 | #include <linux/blkdev.h> |
cafe5635 KO |
18 | #include <linux/buffer_head.h> |
19 | #include <linux/debugfs.h> | |
20 | #include <linux/genhd.h> | |
28935ab5 | 21 | #include <linux/idr.h> |
79826c35 | 22 | #include <linux/kthread.h> |
cafe5635 KO |
23 | #include <linux/module.h> |
24 | #include <linux/random.h> | |
25 | #include <linux/reboot.h> | |
26 | #include <linux/sysfs.h> | |
27 | ||
9aaf5165 CL |
28 | unsigned int bch_cutoff_writeback; |
29 | unsigned int bch_cutoff_writeback_sync; | |
30 | ||
cafe5635 KO |
31 | static const char bcache_magic[] = { |
32 | 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, | |
33 | 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 | |
34 | }; | |
35 | ||
36 | static const char invalid_uuid[] = { | |
37 | 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, | |
38 | 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 | |
39 | }; | |
40 | ||
cafe5635 KO |
41 | static struct kobject *bcache_kobj; |
42 | struct mutex bch_register_lock; | |
43 | LIST_HEAD(bch_cache_sets); | |
44 | static LIST_HEAD(uncached_devices); | |
45 | ||
28935ab5 | 46 | static int bcache_major; |
1dbe32ad | 47 | static DEFINE_IDA(bcache_device_idx); |
cafe5635 KO |
48 | static wait_queue_head_t unregister_wait; |
49 | struct workqueue_struct *bcache_wq; | |
0f843e65 | 50 | struct workqueue_struct *bch_journal_wq; |
cafe5635 KO |
51 | |
52 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) | |
1dbe32ad CL |
53 | /* limitation of partitions number on single bcache device */ |
54 | #define BCACHE_MINORS 128 | |
55 | /* limitation of bcache devices number on single system */ | |
56 | #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) | |
cafe5635 | 57 | |
cafe5635 KO |
58 | /* Superblock */ |
59 | ||
60 | static const char *read_super(struct cache_sb *sb, struct block_device *bdev, | |
61 | struct page **res) | |
62 | { | |
63 | const char *err; | |
64 | struct cache_sb *s; | |
65 | struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); | |
6f10f7d1 | 66 | unsigned int i; |
cafe5635 KO |
67 | |
68 | if (!bh) | |
69 | return "IO error"; | |
70 | ||
71 | s = (struct cache_sb *) bh->b_data; | |
72 | ||
73 | sb->offset = le64_to_cpu(s->offset); | |
74 | sb->version = le64_to_cpu(s->version); | |
75 | ||
76 | memcpy(sb->magic, s->magic, 16); | |
77 | memcpy(sb->uuid, s->uuid, 16); | |
78 | memcpy(sb->set_uuid, s->set_uuid, 16); | |
79 | memcpy(sb->label, s->label, SB_LABEL_SIZE); | |
80 | ||
81 | sb->flags = le64_to_cpu(s->flags); | |
82 | sb->seq = le64_to_cpu(s->seq); | |
cafe5635 | 83 | sb->last_mount = le32_to_cpu(s->last_mount); |
cafe5635 KO |
84 | sb->first_bucket = le16_to_cpu(s->first_bucket); |
85 | sb->keys = le16_to_cpu(s->keys); | |
86 | ||
87 | for (i = 0; i < SB_JOURNAL_BUCKETS; i++) | |
88 | sb->d[i] = le64_to_cpu(s->d[i]); | |
89 | ||
90 | pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", | |
91 | sb->version, sb->flags, sb->seq, sb->keys); | |
92 | ||
93 | err = "Not a bcache superblock"; | |
94 | if (sb->offset != SB_SECTOR) | |
95 | goto err; | |
96 | ||
97 | if (memcmp(sb->magic, bcache_magic, 16)) | |
98 | goto err; | |
99 | ||
100 | err = "Too many journal buckets"; | |
101 | if (sb->keys > SB_JOURNAL_BUCKETS) | |
102 | goto err; | |
103 | ||
104 | err = "Bad checksum"; | |
105 | if (s->csum != csum_set(s)) | |
106 | goto err; | |
107 | ||
108 | err = "Bad UUID"; | |
169ef1cf | 109 | if (bch_is_zero(sb->uuid, 16)) |
cafe5635 KO |
110 | goto err; |
111 | ||
8abb2a5d KO |
112 | sb->block_size = le16_to_cpu(s->block_size); |
113 | ||
114 | err = "Superblock block size smaller than device block size"; | |
115 | if (sb->block_size << 9 < bdev_logical_block_size(bdev)) | |
116 | goto err; | |
117 | ||
2903381f KO |
118 | switch (sb->version) { |
119 | case BCACHE_SB_VERSION_BDEV: | |
2903381f KO |
120 | sb->data_offset = BDEV_DATA_START_DEFAULT; |
121 | break; | |
122 | case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: | |
2903381f KO |
123 | sb->data_offset = le64_to_cpu(s->data_offset); |
124 | ||
125 | err = "Bad data offset"; | |
126 | if (sb->data_offset < BDEV_DATA_START_DEFAULT) | |
127 | goto err; | |
cafe5635 | 128 | |
2903381f KO |
129 | break; |
130 | case BCACHE_SB_VERSION_CDEV: | |
131 | case BCACHE_SB_VERSION_CDEV_WITH_UUID: | |
132 | sb->nbuckets = le64_to_cpu(s->nbuckets); | |
2903381f | 133 | sb->bucket_size = le16_to_cpu(s->bucket_size); |
cafe5635 | 134 | |
2903381f KO |
135 | sb->nr_in_set = le16_to_cpu(s->nr_in_set); |
136 | sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); | |
cafe5635 | 137 | |
2903381f KO |
138 | err = "Too many buckets"; |
139 | if (sb->nbuckets > LONG_MAX) | |
140 | goto err; | |
cafe5635 | 141 | |
2903381f KO |
142 | err = "Not enough buckets"; |
143 | if (sb->nbuckets < 1 << 7) | |
144 | goto err; | |
cafe5635 | 145 | |
2903381f KO |
146 | err = "Bad block/bucket size"; |
147 | if (!is_power_of_2(sb->block_size) || | |
148 | sb->block_size > PAGE_SECTORS || | |
149 | !is_power_of_2(sb->bucket_size) || | |
150 | sb->bucket_size < PAGE_SECTORS) | |
151 | goto err; | |
cafe5635 | 152 | |
2903381f | 153 | err = "Invalid superblock: device too small"; |
b0d30981 CL |
154 | if (get_capacity(bdev->bd_disk) < |
155 | sb->bucket_size * sb->nbuckets) | |
2903381f | 156 | goto err; |
cafe5635 | 157 | |
2903381f KO |
158 | err = "Bad UUID"; |
159 | if (bch_is_zero(sb->set_uuid, 16)) | |
160 | goto err; | |
cafe5635 | 161 | |
2903381f KO |
162 | err = "Bad cache device number in set"; |
163 | if (!sb->nr_in_set || | |
164 | sb->nr_in_set <= sb->nr_this_dev || | |
165 | sb->nr_in_set > MAX_CACHES_PER_SET) | |
cafe5635 KO |
166 | goto err; |
167 | ||
2903381f KO |
168 | err = "Journal buckets not sequential"; |
169 | for (i = 0; i < sb->keys; i++) | |
170 | if (sb->d[i] != sb->first_bucket + i) | |
171 | goto err; | |
cafe5635 | 172 | |
2903381f KO |
173 | err = "Too many journal buckets"; |
174 | if (sb->first_bucket + sb->keys > sb->nbuckets) | |
175 | goto err; | |
176 | ||
177 | err = "Invalid superblock: first bucket comes before end of super"; | |
178 | if (sb->first_bucket * sb->bucket_size < 16) | |
179 | goto err; | |
180 | ||
181 | break; | |
182 | default: | |
183 | err = "Unsupported superblock version"; | |
cafe5635 | 184 | goto err; |
2903381f KO |
185 | } |
186 | ||
75cbb3f1 | 187 | sb->last_mount = (u32)ktime_get_real_seconds(); |
cafe5635 KO |
188 | err = NULL; |
189 | ||
190 | get_page(bh->b_page); | |
191 | *res = bh->b_page; | |
192 | err: | |
193 | put_bh(bh); | |
194 | return err; | |
195 | } | |
196 | ||
4246a0b6 | 197 | static void write_bdev_super_endio(struct bio *bio) |
cafe5635 KO |
198 | { |
199 | struct cached_dev *dc = bio->bi_private; | |
200 | /* XXX: error checking */ | |
201 | ||
cb7a583e | 202 | closure_put(&dc->sb_write); |
cafe5635 KO |
203 | } |
204 | ||
205 | static void __write_super(struct cache_sb *sb, struct bio *bio) | |
206 | { | |
263663cd | 207 | struct cache_sb *out = page_address(bio_first_page_all(bio)); |
6f10f7d1 | 208 | unsigned int i; |
cafe5635 | 209 | |
4f024f37 | 210 | bio->bi_iter.bi_sector = SB_SECTOR; |
4f024f37 | 211 | bio->bi_iter.bi_size = SB_SIZE; |
ad0d9e76 | 212 | bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); |
169ef1cf | 213 | bch_bio_map(bio, NULL); |
cafe5635 KO |
214 | |
215 | out->offset = cpu_to_le64(sb->offset); | |
216 | out->version = cpu_to_le64(sb->version); | |
217 | ||
218 | memcpy(out->uuid, sb->uuid, 16); | |
219 | memcpy(out->set_uuid, sb->set_uuid, 16); | |
220 | memcpy(out->label, sb->label, SB_LABEL_SIZE); | |
221 | ||
222 | out->flags = cpu_to_le64(sb->flags); | |
223 | out->seq = cpu_to_le64(sb->seq); | |
224 | ||
225 | out->last_mount = cpu_to_le32(sb->last_mount); | |
226 | out->first_bucket = cpu_to_le16(sb->first_bucket); | |
227 | out->keys = cpu_to_le16(sb->keys); | |
228 | ||
229 | for (i = 0; i < sb->keys; i++) | |
230 | out->d[i] = cpu_to_le64(sb->d[i]); | |
231 | ||
232 | out->csum = csum_set(out); | |
233 | ||
234 | pr_debug("ver %llu, flags %llu, seq %llu", | |
235 | sb->version, sb->flags, sb->seq); | |
236 | ||
4e49ea4a | 237 | submit_bio(bio); |
cafe5635 KO |
238 | } |
239 | ||
cb7a583e KO |
240 | static void bch_write_bdev_super_unlock(struct closure *cl) |
241 | { | |
242 | struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); | |
243 | ||
244 | up(&dc->sb_write_mutex); | |
245 | } | |
246 | ||
cafe5635 KO |
247 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) |
248 | { | |
cb7a583e | 249 | struct closure *cl = &dc->sb_write; |
cafe5635 KO |
250 | struct bio *bio = &dc->sb_bio; |
251 | ||
cb7a583e KO |
252 | down(&dc->sb_write_mutex); |
253 | closure_init(cl, parent); | |
cafe5635 KO |
254 | |
255 | bio_reset(bio); | |
74d46992 | 256 | bio_set_dev(bio, dc->bdev); |
cafe5635 KO |
257 | bio->bi_end_io = write_bdev_super_endio; |
258 | bio->bi_private = dc; | |
259 | ||
260 | closure_get(cl); | |
27a40ab9 | 261 | /* I/O request sent to backing device */ |
cafe5635 KO |
262 | __write_super(&dc->sb, bio); |
263 | ||
cb7a583e | 264 | closure_return_with_destructor(cl, bch_write_bdev_super_unlock); |
cafe5635 KO |
265 | } |
266 | ||
4246a0b6 | 267 | static void write_super_endio(struct bio *bio) |
cafe5635 KO |
268 | { |
269 | struct cache *ca = bio->bi_private; | |
270 | ||
5138ac67 CL |
271 | /* is_read = 0 */ |
272 | bch_count_io_errors(ca, bio->bi_status, 0, | |
273 | "writing superblock"); | |
cb7a583e KO |
274 | closure_put(&ca->set->sb_write); |
275 | } | |
276 | ||
277 | static void bcache_write_super_unlock(struct closure *cl) | |
278 | { | |
279 | struct cache_set *c = container_of(cl, struct cache_set, sb_write); | |
280 | ||
281 | up(&c->sb_write_mutex); | |
cafe5635 KO |
282 | } |
283 | ||
284 | void bcache_write_super(struct cache_set *c) | |
285 | { | |
cb7a583e | 286 | struct closure *cl = &c->sb_write; |
cafe5635 | 287 | struct cache *ca; |
6f10f7d1 | 288 | unsigned int i; |
cafe5635 | 289 | |
cb7a583e KO |
290 | down(&c->sb_write_mutex); |
291 | closure_init(cl, &c->cl); | |
cafe5635 KO |
292 | |
293 | c->sb.seq++; | |
294 | ||
295 | for_each_cache(ca, c, i) { | |
296 | struct bio *bio = &ca->sb_bio; | |
297 | ||
2903381f | 298 | ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; |
cafe5635 KO |
299 | ca->sb.seq = c->sb.seq; |
300 | ca->sb.last_mount = c->sb.last_mount; | |
301 | ||
302 | SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); | |
303 | ||
304 | bio_reset(bio); | |
74d46992 | 305 | bio_set_dev(bio, ca->bdev); |
cafe5635 KO |
306 | bio->bi_end_io = write_super_endio; |
307 | bio->bi_private = ca; | |
308 | ||
309 | closure_get(cl); | |
310 | __write_super(&ca->sb, bio); | |
311 | } | |
312 | ||
cb7a583e | 313 | closure_return_with_destructor(cl, bcache_write_super_unlock); |
cafe5635 KO |
314 | } |
315 | ||
316 | /* UUID io */ | |
317 | ||
4246a0b6 | 318 | static void uuid_endio(struct bio *bio) |
cafe5635 KO |
319 | { |
320 | struct closure *cl = bio->bi_private; | |
cb7a583e | 321 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); |
cafe5635 | 322 | |
4e4cbee9 | 323 | cache_set_err_on(bio->bi_status, c, "accessing uuids"); |
cafe5635 KO |
324 | bch_bbio_free(bio, c); |
325 | closure_put(cl); | |
326 | } | |
327 | ||
cb7a583e KO |
328 | static void uuid_io_unlock(struct closure *cl) |
329 | { | |
330 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); | |
331 | ||
332 | up(&c->uuid_write_mutex); | |
333 | } | |
334 | ||
ad0d9e76 | 335 | static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, |
cafe5635 KO |
336 | struct bkey *k, struct closure *parent) |
337 | { | |
cb7a583e | 338 | struct closure *cl = &c->uuid_write; |
cafe5635 | 339 | struct uuid_entry *u; |
6f10f7d1 | 340 | unsigned int i; |
85b1492e | 341 | char buf[80]; |
cafe5635 KO |
342 | |
343 | BUG_ON(!parent); | |
cb7a583e KO |
344 | down(&c->uuid_write_mutex); |
345 | closure_init(cl, parent); | |
cafe5635 KO |
346 | |
347 | for (i = 0; i < KEY_PTRS(k); i++) { | |
348 | struct bio *bio = bch_bbio_alloc(c); | |
349 | ||
1eff9d32 | 350 | bio->bi_opf = REQ_SYNC | REQ_META | op_flags; |
4f024f37 | 351 | bio->bi_iter.bi_size = KEY_SIZE(k) << 9; |
cafe5635 KO |
352 | |
353 | bio->bi_end_io = uuid_endio; | |
354 | bio->bi_private = cl; | |
ad0d9e76 | 355 | bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); |
169ef1cf | 356 | bch_bio_map(bio, c->uuids); |
cafe5635 KO |
357 | |
358 | bch_submit_bbio(bio, c, k, i); | |
359 | ||
ad0d9e76 | 360 | if (op != REQ_OP_WRITE) |
cafe5635 KO |
361 | break; |
362 | } | |
363 | ||
dc9d98d6 | 364 | bch_extent_to_text(buf, sizeof(buf), k); |
ad0d9e76 | 365 | pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf); |
cafe5635 KO |
366 | |
367 | for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) | |
169ef1cf | 368 | if (!bch_is_zero(u->uuid, 16)) |
cafe5635 KO |
369 | pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", |
370 | u - c->uuids, u->uuid, u->label, | |
371 | u->first_reg, u->last_reg, u->invalidated); | |
372 | ||
cb7a583e | 373 | closure_return_with_destructor(cl, uuid_io_unlock); |
cafe5635 KO |
374 | } |
375 | ||
376 | static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) | |
377 | { | |
378 | struct bkey *k = &j->uuid_bucket; | |
379 | ||
65d45231 | 380 | if (__bch_btree_ptr_invalid(c, k)) |
cafe5635 KO |
381 | return "bad uuid pointer"; |
382 | ||
383 | bkey_copy(&c->uuid_bucket, k); | |
70fd7614 | 384 | uuid_io(c, REQ_OP_READ, 0, k, cl); |
cafe5635 KO |
385 | |
386 | if (j->version < BCACHE_JSET_VERSION_UUIDv1) { | |
387 | struct uuid_entry_v0 *u0 = (void *) c->uuids; | |
388 | struct uuid_entry *u1 = (void *) c->uuids; | |
389 | int i; | |
390 | ||
391 | closure_sync(cl); | |
392 | ||
393 | /* | |
394 | * Since the new uuid entry is bigger than the old, we have to | |
395 | * convert starting at the highest memory address and work down | |
396 | * in order to do it in place | |
397 | */ | |
398 | ||
399 | for (i = c->nr_uuids - 1; | |
400 | i >= 0; | |
401 | --i) { | |
402 | memcpy(u1[i].uuid, u0[i].uuid, 16); | |
403 | memcpy(u1[i].label, u0[i].label, 32); | |
404 | ||
405 | u1[i].first_reg = u0[i].first_reg; | |
406 | u1[i].last_reg = u0[i].last_reg; | |
407 | u1[i].invalidated = u0[i].invalidated; | |
408 | ||
409 | u1[i].flags = 0; | |
410 | u1[i].sectors = 0; | |
411 | } | |
412 | } | |
413 | ||
414 | return NULL; | |
415 | } | |
416 | ||
417 | static int __uuid_write(struct cache_set *c) | |
418 | { | |
419 | BKEY_PADDED(key) k; | |
420 | struct closure cl; | |
7a55948d | 421 | struct cache *ca; |
cafe5635 | 422 | |
1fae7cf0 | 423 | closure_init_stack(&cl); |
cafe5635 KO |
424 | lockdep_assert_held(&bch_register_lock); |
425 | ||
78365411 | 426 | if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) |
cafe5635 KO |
427 | return 1; |
428 | ||
429 | SET_KEY_SIZE(&k.key, c->sb.bucket_size); | |
ad0d9e76 | 430 | uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); |
cafe5635 KO |
431 | closure_sync(&cl); |
432 | ||
7a55948d SW |
433 | /* Only one bucket used for uuid write */ |
434 | ca = PTR_CACHE(c, &k.key, 0); | |
435 | atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written); | |
436 | ||
cafe5635 | 437 | bkey_copy(&c->uuid_bucket, &k.key); |
3a3b6a4e | 438 | bkey_put(c, &k.key); |
cafe5635 KO |
439 | return 0; |
440 | } | |
441 | ||
442 | int bch_uuid_write(struct cache_set *c) | |
443 | { | |
444 | int ret = __uuid_write(c); | |
445 | ||
446 | if (!ret) | |
447 | bch_journal_meta(c, NULL); | |
448 | ||
449 | return ret; | |
450 | } | |
451 | ||
452 | static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) | |
453 | { | |
454 | struct uuid_entry *u; | |
455 | ||
456 | for (u = c->uuids; | |
457 | u < c->uuids + c->nr_uuids; u++) | |
458 | if (!memcmp(u->uuid, uuid, 16)) | |
459 | return u; | |
460 | ||
461 | return NULL; | |
462 | } | |
463 | ||
464 | static struct uuid_entry *uuid_find_empty(struct cache_set *c) | |
465 | { | |
466 | static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; | |
1fae7cf0 | 467 | |
cafe5635 KO |
468 | return uuid_find(c, zero_uuid); |
469 | } | |
470 | ||
471 | /* | |
472 | * Bucket priorities/gens: | |
473 | * | |
474 | * For each bucket, we store on disk its | |
3be11dba CL |
475 | * 8 bit gen |
476 | * 16 bit priority | |
cafe5635 KO |
477 | * |
478 | * See alloc.c for an explanation of the gen. The priority is used to implement | |
479 | * lru (and in the future other) cache replacement policies; for most purposes | |
480 | * it's just an opaque integer. | |
481 | * | |
482 | * The gens and the priorities don't have a whole lot to do with each other, and | |
483 | * it's actually the gens that must be written out at specific times - it's no | |
484 | * big deal if the priorities don't get written, if we lose them we just reuse | |
485 | * buckets in suboptimal order. | |
486 | * | |
487 | * On disk they're stored in a packed array, and in as many buckets are required | |
488 | * to fit them all. The buckets we use to store them form a list; the journal | |
489 | * header points to the first bucket, the first bucket points to the second | |
490 | * bucket, et cetera. | |
491 | * | |
492 | * This code is used by the allocation code; periodically (whenever it runs out | |
493 | * of buckets to allocate from) the allocation code will invalidate some | |
494 | * buckets, but it can't use those buckets until their new gens are safely on | |
495 | * disk. | |
496 | */ | |
497 | ||
4246a0b6 | 498 | static void prio_endio(struct bio *bio) |
cafe5635 KO |
499 | { |
500 | struct cache *ca = bio->bi_private; | |
501 | ||
4e4cbee9 | 502 | cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); |
cafe5635 KO |
503 | bch_bbio_free(bio, ca->set); |
504 | closure_put(&ca->prio); | |
505 | } | |
506 | ||
ad0d9e76 MC |
507 | static void prio_io(struct cache *ca, uint64_t bucket, int op, |
508 | unsigned long op_flags) | |
cafe5635 KO |
509 | { |
510 | struct closure *cl = &ca->prio; | |
511 | struct bio *bio = bch_bbio_alloc(ca->set); | |
512 | ||
513 | closure_init_stack(cl); | |
514 | ||
4f024f37 | 515 | bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; |
74d46992 | 516 | bio_set_dev(bio, ca->bdev); |
4f024f37 | 517 | bio->bi_iter.bi_size = bucket_bytes(ca); |
cafe5635 KO |
518 | |
519 | bio->bi_end_io = prio_endio; | |
520 | bio->bi_private = ca; | |
ad0d9e76 | 521 | bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); |
169ef1cf | 522 | bch_bio_map(bio, ca->disk_buckets); |
cafe5635 | 523 | |
771f393e | 524 | closure_bio_submit(ca->set, bio, &ca->prio); |
cafe5635 KO |
525 | closure_sync(cl); |
526 | } | |
527 | ||
cafe5635 KO |
528 | void bch_prio_write(struct cache *ca) |
529 | { | |
530 | int i; | |
531 | struct bucket *b; | |
532 | struct closure cl; | |
533 | ||
534 | closure_init_stack(&cl); | |
535 | ||
536 | lockdep_assert_held(&ca->set->bucket_lock); | |
537 | ||
cafe5635 KO |
538 | ca->disk_buckets->seq++; |
539 | ||
540 | atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), | |
541 | &ca->meta_sectors_written); | |
542 | ||
78365411 KO |
543 | //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), |
544 | // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); | |
cafe5635 KO |
545 | |
546 | for (i = prio_buckets(ca) - 1; i >= 0; --i) { | |
547 | long bucket; | |
548 | struct prio_set *p = ca->disk_buckets; | |
b1a67b0f KO |
549 | struct bucket_disk *d = p->data; |
550 | struct bucket_disk *end = d + prios_per_bucket(ca); | |
cafe5635 KO |
551 | |
552 | for (b = ca->buckets + i * prios_per_bucket(ca); | |
553 | b < ca->buckets + ca->sb.nbuckets && d < end; | |
554 | b++, d++) { | |
555 | d->prio = cpu_to_le16(b->prio); | |
556 | d->gen = b->gen; | |
557 | } | |
558 | ||
559 | p->next_bucket = ca->prio_buckets[i + 1]; | |
81ab4190 | 560 | p->magic = pset_magic(&ca->sb); |
169ef1cf | 561 | p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); |
cafe5635 | 562 | |
78365411 | 563 | bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); |
cafe5635 KO |
564 | BUG_ON(bucket == -1); |
565 | ||
566 | mutex_unlock(&ca->set->bucket_lock); | |
ad0d9e76 | 567 | prio_io(ca, bucket, REQ_OP_WRITE, 0); |
cafe5635 KO |
568 | mutex_lock(&ca->set->bucket_lock); |
569 | ||
570 | ca->prio_buckets[i] = bucket; | |
571 | atomic_dec_bug(&ca->buckets[bucket].pin); | |
572 | } | |
573 | ||
574 | mutex_unlock(&ca->set->bucket_lock); | |
575 | ||
576 | bch_journal_meta(ca->set, &cl); | |
577 | closure_sync(&cl); | |
578 | ||
579 | mutex_lock(&ca->set->bucket_lock); | |
580 | ||
cafe5635 KO |
581 | /* |
582 | * Don't want the old priorities to get garbage collected until after we | |
583 | * finish writing the new ones, and they're journalled | |
584 | */ | |
2531d9ee KO |
585 | for (i = 0; i < prio_buckets(ca); i++) { |
586 | if (ca->prio_last_buckets[i]) | |
587 | __bch_bucket_free(ca, | |
588 | &ca->buckets[ca->prio_last_buckets[i]]); | |
589 | ||
cafe5635 | 590 | ca->prio_last_buckets[i] = ca->prio_buckets[i]; |
2531d9ee | 591 | } |
cafe5635 KO |
592 | } |
593 | ||
594 | static void prio_read(struct cache *ca, uint64_t bucket) | |
595 | { | |
596 | struct prio_set *p = ca->disk_buckets; | |
597 | struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; | |
598 | struct bucket *b; | |
6f10f7d1 | 599 | unsigned int bucket_nr = 0; |
cafe5635 KO |
600 | |
601 | for (b = ca->buckets; | |
602 | b < ca->buckets + ca->sb.nbuckets; | |
603 | b++, d++) { | |
604 | if (d == end) { | |
605 | ca->prio_buckets[bucket_nr] = bucket; | |
606 | ca->prio_last_buckets[bucket_nr] = bucket; | |
607 | bucket_nr++; | |
608 | ||
70fd7614 | 609 | prio_io(ca, bucket, REQ_OP_READ, 0); |
cafe5635 | 610 | |
b0d30981 CL |
611 | if (p->csum != |
612 | bch_crc64(&p->magic, bucket_bytes(ca) - 8)) | |
cafe5635 KO |
613 | pr_warn("bad csum reading priorities"); |
614 | ||
81ab4190 | 615 | if (p->magic != pset_magic(&ca->sb)) |
cafe5635 KO |
616 | pr_warn("bad magic reading priorities"); |
617 | ||
618 | bucket = p->next_bucket; | |
619 | d = p->data; | |
620 | } | |
621 | ||
622 | b->prio = le16_to_cpu(d->prio); | |
3a2fd9d5 | 623 | b->gen = b->last_gc = d->gen; |
cafe5635 KO |
624 | } |
625 | } | |
626 | ||
627 | /* Bcache device */ | |
628 | ||
629 | static int open_dev(struct block_device *b, fmode_t mode) | |
630 | { | |
631 | struct bcache_device *d = b->bd_disk->private_data; | |
1fae7cf0 | 632 | |
c4d951dd | 633 | if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) |
cafe5635 KO |
634 | return -ENXIO; |
635 | ||
636 | closure_get(&d->cl); | |
637 | return 0; | |
638 | } | |
639 | ||
867e1162 | 640 | static void release_dev(struct gendisk *b, fmode_t mode) |
cafe5635 KO |
641 | { |
642 | struct bcache_device *d = b->private_data; | |
1fae7cf0 | 643 | |
cafe5635 | 644 | closure_put(&d->cl); |
cafe5635 KO |
645 | } |
646 | ||
647 | static int ioctl_dev(struct block_device *b, fmode_t mode, | |
648 | unsigned int cmd, unsigned long arg) | |
649 | { | |
650 | struct bcache_device *d = b->bd_disk->private_data; | |
0f0709e6 | 651 | |
cafe5635 KO |
652 | return d->ioctl(d, mode, cmd, arg); |
653 | } | |
654 | ||
655 | static const struct block_device_operations bcache_ops = { | |
656 | .open = open_dev, | |
657 | .release = release_dev, | |
658 | .ioctl = ioctl_dev, | |
659 | .owner = THIS_MODULE, | |
660 | }; | |
661 | ||
662 | void bcache_device_stop(struct bcache_device *d) | |
663 | { | |
c4d951dd | 664 | if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) |
cafe5635 KO |
665 | closure_queue(&d->cl); |
666 | } | |
667 | ||
ee668506 KO |
668 | static void bcache_device_unlink(struct bcache_device *d) |
669 | { | |
c4d951dd | 670 | lockdep_assert_held(&bch_register_lock); |
ee668506 | 671 | |
c4d951dd | 672 | if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { |
6f10f7d1 | 673 | unsigned int i; |
c4d951dd | 674 | struct cache *ca; |
ee668506 | 675 | |
c4d951dd KO |
676 | sysfs_remove_link(&d->c->kobj, d->name); |
677 | sysfs_remove_link(&d->kobj, "cache"); | |
678 | ||
679 | for_each_cache(ca, d->c, i) | |
680 | bd_unlink_disk_holder(ca->bdev, d->disk); | |
681 | } | |
ee668506 KO |
682 | } |
683 | ||
684 | static void bcache_device_link(struct bcache_device *d, struct cache_set *c, | |
685 | const char *name) | |
686 | { | |
6f10f7d1 | 687 | unsigned int i; |
ee668506 KO |
688 | struct cache *ca; |
689 | ||
690 | for_each_cache(ca, d->c, i) | |
691 | bd_link_disk_holder(ca->bdev, d->disk); | |
692 | ||
693 | snprintf(d->name, BCACHEDEVNAME_SIZE, | |
694 | "%s%u", name, d->id); | |
695 | ||
696 | WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || | |
697 | sysfs_create_link(&c->kobj, &d->kobj, d->name), | |
698 | "Couldn't create device <-> cache set symlinks"); | |
fecaee6f ZL |
699 | |
700 | clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); | |
ee668506 KO |
701 | } |
702 | ||
cafe5635 KO |
703 | static void bcache_device_detach(struct bcache_device *d) |
704 | { | |
705 | lockdep_assert_held(&bch_register_lock); | |
706 | ||
ea8c5356 CL |
707 | atomic_dec(&d->c->attached_dev_nr); |
708 | ||
c4d951dd | 709 | if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { |
cafe5635 KO |
710 | struct uuid_entry *u = d->c->uuids + d->id; |
711 | ||
712 | SET_UUID_FLASH_ONLY(u, 0); | |
713 | memcpy(u->uuid, invalid_uuid, 16); | |
75cbb3f1 | 714 | u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); |
cafe5635 | 715 | bch_uuid_write(d->c); |
cafe5635 KO |
716 | } |
717 | ||
c4d951dd | 718 | bcache_device_unlink(d); |
ee668506 | 719 | |
cafe5635 KO |
720 | d->c->devices[d->id] = NULL; |
721 | closure_put(&d->c->caching); | |
722 | d->c = NULL; | |
723 | } | |
724 | ||
725 | static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, | |
6f10f7d1 | 726 | unsigned int id) |
cafe5635 | 727 | { |
cafe5635 KO |
728 | d->id = id; |
729 | d->c = c; | |
730 | c->devices[id] = d; | |
731 | ||
2831231d CL |
732 | if (id >= c->devices_max_used) |
733 | c->devices_max_used = id + 1; | |
734 | ||
cafe5635 KO |
735 | closure_get(&c->caching); |
736 | } | |
737 | ||
1dbe32ad CL |
738 | static inline int first_minor_to_idx(int first_minor) |
739 | { | |
740 | return (first_minor/BCACHE_MINORS); | |
741 | } | |
742 | ||
743 | static inline int idx_to_first_minor(int idx) | |
744 | { | |
745 | return (idx * BCACHE_MINORS); | |
746 | } | |
747 | ||
cafe5635 KO |
748 | static void bcache_device_free(struct bcache_device *d) |
749 | { | |
750 | lockdep_assert_held(&bch_register_lock); | |
751 | ||
752 | pr_info("%s stopped", d->disk->disk_name); | |
753 | ||
754 | if (d->c) | |
755 | bcache_device_detach(d); | |
f59fce84 | 756 | if (d->disk && d->disk->flags & GENHD_FL_UP) |
cafe5635 KO |
757 | del_gendisk(d->disk); |
758 | if (d->disk && d->disk->queue) | |
759 | blk_cleanup_queue(d->disk->queue); | |
28935ab5 | 760 | if (d->disk) { |
1dbe32ad CL |
761 | ida_simple_remove(&bcache_device_idx, |
762 | first_minor_to_idx(d->disk->first_minor)); | |
cafe5635 | 763 | put_disk(d->disk); |
28935ab5 | 764 | } |
cafe5635 | 765 | |
d19936a2 | 766 | bioset_exit(&d->bio_split); |
958b4338 PE |
767 | kvfree(d->full_dirty_stripes); |
768 | kvfree(d->stripe_sectors_dirty); | |
cafe5635 KO |
769 | |
770 | closure_debug_destroy(&d->cl); | |
771 | } | |
772 | ||
6f10f7d1 | 773 | static int bcache_device_init(struct bcache_device *d, unsigned int block_size, |
279afbad | 774 | sector_t sectors) |
cafe5635 KO |
775 | { |
776 | struct request_queue *q; | |
5f2b18ec BVA |
777 | const size_t max_stripes = min_t(size_t, INT_MAX, |
778 | SIZE_MAX / sizeof(atomic_t)); | |
279afbad | 779 | size_t n; |
1dbe32ad | 780 | int idx; |
279afbad | 781 | |
2d679fc7 KO |
782 | if (!d->stripe_size) |
783 | d->stripe_size = 1 << 31; | |
279afbad | 784 | |
2d679fc7 | 785 | d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); |
279afbad | 786 | |
5f2b18ec | 787 | if (!d->nr_stripes || d->nr_stripes > max_stripes) { |
90706094 | 788 | pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", |
6f10f7d1 | 789 | (unsigned int)d->nr_stripes); |
279afbad | 790 | return -ENOMEM; |
48a915a8 | 791 | } |
279afbad KO |
792 | |
793 | n = d->nr_stripes * sizeof(atomic_t); | |
bc4e54f6 | 794 | d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); |
279afbad KO |
795 | if (!d->stripe_sectors_dirty) |
796 | return -ENOMEM; | |
cafe5635 | 797 | |
48a915a8 | 798 | n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); |
bc4e54f6 | 799 | d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); |
48a915a8 KO |
800 | if (!d->full_dirty_stripes) |
801 | return -ENOMEM; | |
802 | ||
1dbe32ad CL |
803 | idx = ida_simple_get(&bcache_device_idx, 0, |
804 | BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); | |
805 | if (idx < 0) | |
806 | return idx; | |
b8c0d911 | 807 | |
d19936a2 | 808 | if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio), |
9b4e9f5a FS |
809 | BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) |
810 | goto err; | |
811 | ||
812 | d->disk = alloc_disk(BCACHE_MINORS); | |
813 | if (!d->disk) | |
814 | goto err; | |
cafe5635 | 815 | |
279afbad | 816 | set_capacity(d->disk, sectors); |
1dbe32ad | 817 | snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); |
cafe5635 KO |
818 | |
819 | d->disk->major = bcache_major; | |
1dbe32ad | 820 | d->disk->first_minor = idx_to_first_minor(idx); |
cafe5635 KO |
821 | d->disk->fops = &bcache_ops; |
822 | d->disk->private_data = d; | |
823 | ||
28935ab5 KO |
824 | q = blk_alloc_queue(GFP_KERNEL); |
825 | if (!q) | |
826 | return -ENOMEM; | |
827 | ||
cafe5635 KO |
828 | blk_queue_make_request(q, NULL); |
829 | d->disk->queue = q; | |
830 | q->queuedata = d; | |
dc3b17cc | 831 | q->backing_dev_info->congested_data = d; |
cafe5635 KO |
832 | q->limits.max_hw_sectors = UINT_MAX; |
833 | q->limits.max_sectors = UINT_MAX; | |
834 | q->limits.max_segment_size = UINT_MAX; | |
835 | q->limits.max_segments = BIO_MAX_PAGES; | |
2bb4cd5c | 836 | blk_queue_max_discard_sectors(q, UINT_MAX); |
90db6919 | 837 | q->limits.discard_granularity = 512; |
cafe5635 KO |
838 | q->limits.io_min = block_size; |
839 | q->limits.logical_block_size = block_size; | |
840 | q->limits.physical_block_size = block_size; | |
44e1ebe2 BVA |
841 | blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); |
842 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); | |
843 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue); | |
cafe5635 | 844 | |
84b4ff9e | 845 | blk_queue_write_cache(q, true, true); |
54d12f2b | 846 | |
cafe5635 | 847 | return 0; |
9b4e9f5a FS |
848 | |
849 | err: | |
850 | ida_simple_remove(&bcache_device_idx, idx); | |
851 | return -ENOMEM; | |
852 | ||
cafe5635 KO |
853 | } |
854 | ||
855 | /* Cached device */ | |
856 | ||
857 | static void calc_cached_dev_sectors(struct cache_set *c) | |
858 | { | |
859 | uint64_t sectors = 0; | |
860 | struct cached_dev *dc; | |
861 | ||
862 | list_for_each_entry(dc, &c->cached_devs, list) | |
863 | sectors += bdev_sectors(dc->bdev); | |
864 | ||
865 | c->cached_dev_sectors = sectors; | |
866 | } | |
867 | ||
0f0709e6 CL |
868 | #define BACKING_DEV_OFFLINE_TIMEOUT 5 |
869 | static int cached_dev_status_update(void *arg) | |
870 | { | |
871 | struct cached_dev *dc = arg; | |
872 | struct request_queue *q; | |
873 | ||
874 | /* | |
875 | * If this delayed worker is stopping outside, directly quit here. | |
876 | * dc->io_disable might be set via sysfs interface, so check it | |
877 | * here too. | |
878 | */ | |
879 | while (!kthread_should_stop() && !dc->io_disable) { | |
880 | q = bdev_get_queue(dc->bdev); | |
881 | if (blk_queue_dying(q)) | |
882 | dc->offline_seconds++; | |
883 | else | |
884 | dc->offline_seconds = 0; | |
885 | ||
886 | if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) { | |
887 | pr_err("%s: device offline for %d seconds", | |
888 | dc->backing_dev_name, | |
889 | BACKING_DEV_OFFLINE_TIMEOUT); | |
890 | pr_err("%s: disable I/O request due to backing " | |
891 | "device offline", dc->disk.name); | |
892 | dc->io_disable = true; | |
893 | /* let others know earlier that io_disable is true */ | |
894 | smp_mb(); | |
895 | bcache_device_stop(&dc->disk); | |
896 | break; | |
897 | } | |
898 | schedule_timeout_interruptible(HZ); | |
899 | } | |
900 | ||
901 | wait_for_kthread_stop(); | |
902 | return 0; | |
903 | } | |
904 | ||
905 | ||
cafe5635 KO |
906 | void bch_cached_dev_run(struct cached_dev *dc) |
907 | { | |
908 | struct bcache_device *d = &dc->disk; | |
ab9e1400 | 909 | char buf[SB_LABEL_SIZE + 1]; |
a25c32be GP |
910 | char *env[] = { |
911 | "DRIVER=bcache", | |
912 | kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), | |
ab9e1400 GP |
913 | NULL, |
914 | NULL, | |
a25c32be | 915 | }; |
cafe5635 | 916 | |
ab9e1400 GP |
917 | memcpy(buf, dc->sb.label, SB_LABEL_SIZE); |
918 | buf[SB_LABEL_SIZE] = '\0'; | |
919 | env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); | |
920 | ||
4d4d8573 AV |
921 | if (atomic_xchg(&dc->running, 1)) { |
922 | kfree(env[1]); | |
923 | kfree(env[2]); | |
cafe5635 | 924 | return; |
4d4d8573 | 925 | } |
cafe5635 KO |
926 | |
927 | if (!d->c && | |
928 | BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { | |
929 | struct closure cl; | |
1fae7cf0 | 930 | |
cafe5635 KO |
931 | closure_init_stack(&cl); |
932 | ||
933 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); | |
934 | bch_write_bdev_super(dc, &cl); | |
935 | closure_sync(&cl); | |
936 | } | |
937 | ||
938 | add_disk(d->disk); | |
ee668506 | 939 | bd_link_disk_holder(dc->bdev, dc->disk.disk); |
3be11dba CL |
940 | /* |
941 | * won't show up in the uevent file, use udevadm monitor -e instead | |
942 | * only class / kset properties are persistent | |
943 | */ | |
cafe5635 | 944 | kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); |
a25c32be | 945 | kfree(env[1]); |
ab9e1400 | 946 | kfree(env[2]); |
a25c32be | 947 | |
cafe5635 KO |
948 | if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || |
949 | sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) | |
950 | pr_debug("error creating sysfs link"); | |
0f0709e6 CL |
951 | |
952 | dc->status_update_thread = kthread_run(cached_dev_status_update, | |
953 | dc, "bcache_status_update"); | |
954 | if (IS_ERR(dc->status_update_thread)) { | |
955 | pr_warn("failed to create bcache_status_update kthread, " | |
956 | "continue to run without monitoring backing " | |
957 | "device status"); | |
958 | } | |
cafe5635 KO |
959 | } |
960 | ||
3fd47bfe CL |
961 | /* |
962 | * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed | |
963 | * work dc->writeback_rate_update is running. Wait until the routine | |
964 | * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to | |
965 | * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out | |
966 | * seconds, give up waiting here and continue to cancel it too. | |
967 | */ | |
968 | static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) | |
969 | { | |
970 | int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ; | |
971 | ||
972 | do { | |
973 | if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING, | |
974 | &dc->disk.flags)) | |
975 | break; | |
976 | time_out--; | |
977 | schedule_timeout_interruptible(1); | |
978 | } while (time_out > 0); | |
979 | ||
980 | if (time_out == 0) | |
981 | pr_warn("give up waiting for dc->writeback_write_update to quit"); | |
982 | ||
983 | cancel_delayed_work_sync(&dc->writeback_rate_update); | |
984 | } | |
985 | ||
cafe5635 KO |
986 | static void cached_dev_detach_finish(struct work_struct *w) |
987 | { | |
988 | struct cached_dev *dc = container_of(w, struct cached_dev, detach); | |
cafe5635 | 989 | struct closure cl; |
1fae7cf0 | 990 | |
cafe5635 KO |
991 | closure_init_stack(&cl); |
992 | ||
c4d951dd | 993 | BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); |
3b304d24 | 994 | BUG_ON(refcount_read(&dc->count)); |
cafe5635 | 995 | |
cafe5635 KO |
996 | mutex_lock(&bch_register_lock); |
997 | ||
3fd47bfe CL |
998 | if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) |
999 | cancel_writeback_rate_update_dwork(dc); | |
1000 | ||
8d29c442 TJ |
1001 | if (!IS_ERR_OR_NULL(dc->writeback_thread)) { |
1002 | kthread_stop(dc->writeback_thread); | |
1003 | dc->writeback_thread = NULL; | |
1004 | } | |
1005 | ||
cafe5635 KO |
1006 | memset(&dc->sb.set_uuid, 0, 16); |
1007 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); | |
1008 | ||
1009 | bch_write_bdev_super(dc, &cl); | |
1010 | closure_sync(&cl); | |
1011 | ||
46010141 | 1012 | calc_cached_dev_sectors(dc->disk.c); |
cafe5635 KO |
1013 | bcache_device_detach(&dc->disk); |
1014 | list_move(&dc->list, &uncached_devices); | |
1015 | ||
c4d951dd | 1016 | clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); |
5b1016e6 | 1017 | clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); |
c4d951dd | 1018 | |
cafe5635 KO |
1019 | mutex_unlock(&bch_register_lock); |
1020 | ||
6e916a7e | 1021 | pr_info("Caching disabled for %s", dc->backing_dev_name); |
cafe5635 KO |
1022 | |
1023 | /* Drop ref we took in cached_dev_detach() */ | |
1024 | closure_put(&dc->disk.cl); | |
1025 | } | |
1026 | ||
1027 | void bch_cached_dev_detach(struct cached_dev *dc) | |
1028 | { | |
1029 | lockdep_assert_held(&bch_register_lock); | |
1030 | ||
c4d951dd | 1031 | if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) |
cafe5635 KO |
1032 | return; |
1033 | ||
c4d951dd | 1034 | if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) |
cafe5635 KO |
1035 | return; |
1036 | ||
1037 | /* | |
1038 | * Block the device from being closed and freed until we're finished | |
1039 | * detaching | |
1040 | */ | |
1041 | closure_get(&dc->disk.cl); | |
1042 | ||
1043 | bch_writeback_queue(dc); | |
3fd47bfe | 1044 | |
cafe5635 KO |
1045 | cached_dev_put(dc); |
1046 | } | |
1047 | ||
73ac105b TJ |
1048 | int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, |
1049 | uint8_t *set_uuid) | |
cafe5635 | 1050 | { |
75cbb3f1 | 1051 | uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds()); |
cafe5635 | 1052 | struct uuid_entry *u; |
86755b7a | 1053 | struct cached_dev *exist_dc, *t; |
cafe5635 | 1054 | |
73ac105b TJ |
1055 | if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || |
1056 | (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) | |
cafe5635 KO |
1057 | return -ENOENT; |
1058 | ||
1059 | if (dc->disk.c) { | |
6e916a7e CL |
1060 | pr_err("Can't attach %s: already attached", |
1061 | dc->backing_dev_name); | |
cafe5635 KO |
1062 | return -EINVAL; |
1063 | } | |
1064 | ||
1065 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) { | |
6e916a7e CL |
1066 | pr_err("Can't attach %s: shutting down", |
1067 | dc->backing_dev_name); | |
cafe5635 KO |
1068 | return -EINVAL; |
1069 | } | |
1070 | ||
1071 | if (dc->sb.block_size < c->sb.block_size) { | |
1072 | /* Will die */ | |
b1a67b0f | 1073 | pr_err("Couldn't attach %s: block size less than set's block size", |
6e916a7e | 1074 | dc->backing_dev_name); |
cafe5635 KO |
1075 | return -EINVAL; |
1076 | } | |
1077 | ||
86755b7a ML |
1078 | /* Check whether already attached */ |
1079 | list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { | |
1080 | if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { | |
1081 | pr_err("Tried to attach %s but duplicate UUID already attached", | |
6e916a7e | 1082 | dc->backing_dev_name); |
86755b7a ML |
1083 | |
1084 | return -EINVAL; | |
1085 | } | |
1086 | } | |
1087 | ||
cafe5635 KO |
1088 | u = uuid_find(c, dc->sb.uuid); |
1089 | ||
1090 | if (u && | |
1091 | (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || | |
1092 | BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { | |
1093 | memcpy(u->uuid, invalid_uuid, 16); | |
75cbb3f1 | 1094 | u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); |
cafe5635 KO |
1095 | u = NULL; |
1096 | } | |
1097 | ||
1098 | if (!u) { | |
1099 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { | |
6e916a7e CL |
1100 | pr_err("Couldn't find uuid for %s in set", |
1101 | dc->backing_dev_name); | |
cafe5635 KO |
1102 | return -ENOENT; |
1103 | } | |
1104 | ||
1105 | u = uuid_find_empty(c); | |
1106 | if (!u) { | |
6e916a7e CL |
1107 | pr_err("Not caching %s, no room for UUID", |
1108 | dc->backing_dev_name); | |
cafe5635 KO |
1109 | return -EINVAL; |
1110 | } | |
1111 | } | |
1112 | ||
3be11dba CL |
1113 | /* |
1114 | * Deadlocks since we're called via sysfs... | |
1115 | * sysfs_remove_file(&dc->kobj, &sysfs_attach); | |
cafe5635 KO |
1116 | */ |
1117 | ||
169ef1cf | 1118 | if (bch_is_zero(u->uuid, 16)) { |
cafe5635 | 1119 | struct closure cl; |
1fae7cf0 | 1120 | |
cafe5635 KO |
1121 | closure_init_stack(&cl); |
1122 | ||
1123 | memcpy(u->uuid, dc->sb.uuid, 16); | |
1124 | memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); | |
1125 | u->first_reg = u->last_reg = rtime; | |
1126 | bch_uuid_write(c); | |
1127 | ||
1128 | memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); | |
1129 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); | |
1130 | ||
1131 | bch_write_bdev_super(dc, &cl); | |
1132 | closure_sync(&cl); | |
1133 | } else { | |
1134 | u->last_reg = rtime; | |
1135 | bch_uuid_write(c); | |
1136 | } | |
1137 | ||
1138 | bcache_device_attach(&dc->disk, c, u - c->uuids); | |
cafe5635 KO |
1139 | list_move(&dc->list, &c->cached_devs); |
1140 | calc_cached_dev_sectors(c); | |
1141 | ||
cafe5635 KO |
1142 | /* |
1143 | * dc->c must be set before dc->count != 0 - paired with the mb in | |
1144 | * cached_dev_get() | |
1145 | */ | |
eb2b3d03 | 1146 | smp_wmb(); |
3b304d24 | 1147 | refcount_set(&dc->count, 1); |
cafe5635 | 1148 | |
07cc6ef8 EW |
1149 | /* Block writeback thread, but spawn it */ |
1150 | down_write(&dc->writeback_lock); | |
1151 | if (bch_cached_dev_writeback_start(dc)) { | |
1152 | up_write(&dc->writeback_lock); | |
9e5c3535 | 1153 | return -ENOMEM; |
07cc6ef8 | 1154 | } |
9e5c3535 | 1155 | |
cafe5635 KO |
1156 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { |
1157 | atomic_set(&dc->has_dirty, 1); | |
cafe5635 KO |
1158 | bch_writeback_queue(dc); |
1159 | } | |
1160 | ||
2e17a262 TJ |
1161 | bch_sectors_dirty_init(&dc->disk); |
1162 | ||
cafe5635 | 1163 | bch_cached_dev_run(dc); |
ee668506 | 1164 | bcache_device_link(&dc->disk, c, "bdev"); |
ea8c5356 | 1165 | atomic_inc(&c->attached_dev_nr); |
cafe5635 | 1166 | |
07cc6ef8 EW |
1167 | /* Allow the writeback thread to proceed */ |
1168 | up_write(&dc->writeback_lock); | |
1169 | ||
cafe5635 | 1170 | pr_info("Caching %s as %s on set %pU", |
6e916a7e CL |
1171 | dc->backing_dev_name, |
1172 | dc->disk.disk->disk_name, | |
cafe5635 KO |
1173 | dc->disk.c->sb.set_uuid); |
1174 | return 0; | |
1175 | } | |
1176 | ||
1177 | void bch_cached_dev_release(struct kobject *kobj) | |
1178 | { | |
1179 | struct cached_dev *dc = container_of(kobj, struct cached_dev, | |
1180 | disk.kobj); | |
1181 | kfree(dc); | |
1182 | module_put(THIS_MODULE); | |
1183 | } | |
1184 | ||
1185 | static void cached_dev_free(struct closure *cl) | |
1186 | { | |
1187 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); | |
1188 | ||
3fd47bfe CL |
1189 | mutex_lock(&bch_register_lock); |
1190 | ||
1191 | if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) | |
1192 | cancel_writeback_rate_update_dwork(dc); | |
1193 | ||
a664d0f0 SP |
1194 | if (!IS_ERR_OR_NULL(dc->writeback_thread)) |
1195 | kthread_stop(dc->writeback_thread); | |
9baf3097 TJ |
1196 | if (dc->writeback_write_wq) |
1197 | destroy_workqueue(dc->writeback_write_wq); | |
0f0709e6 CL |
1198 | if (!IS_ERR_OR_NULL(dc->status_update_thread)) |
1199 | kthread_stop(dc->status_update_thread); | |
cafe5635 | 1200 | |
f59fce84 KO |
1201 | if (atomic_read(&dc->running)) |
1202 | bd_unlink_disk_holder(dc->bdev, dc->disk.disk); | |
cafe5635 KO |
1203 | bcache_device_free(&dc->disk); |
1204 | list_del(&dc->list); | |
1205 | ||
1206 | mutex_unlock(&bch_register_lock); | |
1207 | ||
0781c874 | 1208 | if (!IS_ERR_OR_NULL(dc->bdev)) |
cafe5635 | 1209 | blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
cafe5635 KO |
1210 | |
1211 | wake_up(&unregister_wait); | |
1212 | ||
1213 | kobject_put(&dc->disk.kobj); | |
1214 | } | |
1215 | ||
1216 | static void cached_dev_flush(struct closure *cl) | |
1217 | { | |
1218 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); | |
1219 | struct bcache_device *d = &dc->disk; | |
1220 | ||
c9502ea4 | 1221 | mutex_lock(&bch_register_lock); |
c4d951dd | 1222 | bcache_device_unlink(d); |
c9502ea4 KO |
1223 | mutex_unlock(&bch_register_lock); |
1224 | ||
cafe5635 KO |
1225 | bch_cache_accounting_destroy(&dc->accounting); |
1226 | kobject_del(&d->kobj); | |
1227 | ||
1228 | continue_at(cl, cached_dev_free, system_wq); | |
1229 | } | |
1230 | ||
6f10f7d1 | 1231 | static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) |
cafe5635 | 1232 | { |
f59fce84 | 1233 | int ret; |
cafe5635 | 1234 | struct io *io; |
f59fce84 | 1235 | struct request_queue *q = bdev_get_queue(dc->bdev); |
cafe5635 KO |
1236 | |
1237 | __module_get(THIS_MODULE); | |
1238 | INIT_LIST_HEAD(&dc->list); | |
f59fce84 KO |
1239 | closure_init(&dc->disk.cl, NULL); |
1240 | set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); | |
cafe5635 | 1241 | kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); |
cafe5635 | 1242 | INIT_WORK(&dc->detach, cached_dev_detach_finish); |
cb7a583e | 1243 | sema_init(&dc->sb_write_mutex, 1); |
f59fce84 KO |
1244 | INIT_LIST_HEAD(&dc->io_lru); |
1245 | spin_lock_init(&dc->io_lock); | |
1246 | bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); | |
cafe5635 | 1247 | |
cafe5635 KO |
1248 | dc->sequential_cutoff = 4 << 20; |
1249 | ||
cafe5635 KO |
1250 | for (io = dc->io; io < dc->io + RECENT_IO; io++) { |
1251 | list_add(&io->lru, &dc->io_lru); | |
1252 | hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); | |
1253 | } | |
1254 | ||
c78afc62 KO |
1255 | dc->disk.stripe_size = q->limits.io_opt >> 9; |
1256 | ||
1257 | if (dc->disk.stripe_size) | |
1258 | dc->partial_stripes_expensive = | |
1259 | q->limits.raid_partial_stripes_expensive; | |
1260 | ||
279afbad KO |
1261 | ret = bcache_device_init(&dc->disk, block_size, |
1262 | dc->bdev->bd_part->nr_sects - dc->sb.data_offset); | |
f59fce84 KO |
1263 | if (ret) |
1264 | return ret; | |
1265 | ||
dc3b17cc JK |
1266 | dc->disk.disk->queue->backing_dev_info->ra_pages = |
1267 | max(dc->disk.disk->queue->backing_dev_info->ra_pages, | |
1268 | q->backing_dev_info->ra_pages); | |
f59fce84 | 1269 | |
c7b7bd07 CL |
1270 | atomic_set(&dc->io_errors, 0); |
1271 | dc->io_disable = false; | |
1272 | dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; | |
7e027ca4 CL |
1273 | /* default to auto */ |
1274 | dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; | |
1275 | ||
f59fce84 KO |
1276 | bch_cached_dev_request_init(dc); |
1277 | bch_cached_dev_writeback_init(dc); | |
cafe5635 | 1278 | return 0; |
cafe5635 KO |
1279 | } |
1280 | ||
1281 | /* Cached device - bcache superblock */ | |
1282 | ||
f59fce84 | 1283 | static void register_bdev(struct cache_sb *sb, struct page *sb_page, |
cafe5635 KO |
1284 | struct block_device *bdev, |
1285 | struct cached_dev *dc) | |
1286 | { | |
cafe5635 | 1287 | const char *err = "cannot allocate memory"; |
cafe5635 KO |
1288 | struct cache_set *c; |
1289 | ||
6e916a7e | 1290 | bdevname(bdev, dc->backing_dev_name); |
cafe5635 | 1291 | memcpy(&dc->sb, sb, sizeof(struct cache_sb)); |
cafe5635 KO |
1292 | dc->bdev = bdev; |
1293 | dc->bdev->bd_holder = dc; | |
1294 | ||
3a83f467 | 1295 | bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1); |
263663cd | 1296 | bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; |
f59fce84 | 1297 | get_page(sb_page); |
4f0fd955 | 1298 | |
6e916a7e | 1299 | |
f59fce84 KO |
1300 | if (cached_dev_init(dc, sb->block_size << 9)) |
1301 | goto err; | |
cafe5635 KO |
1302 | |
1303 | err = "error creating kobject"; | |
1304 | if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, | |
1305 | "bcache")) | |
1306 | goto err; | |
1307 | if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) | |
1308 | goto err; | |
1309 | ||
6e916a7e | 1310 | pr_info("registered backing device %s", dc->backing_dev_name); |
f59fce84 | 1311 | |
cafe5635 | 1312 | list_add(&dc->list, &uncached_devices); |
e57fd746 | 1313 | /* attach to a matched cache set if it exists */ |
cafe5635 | 1314 | list_for_each_entry(c, &bch_cache_sets, list) |
73ac105b | 1315 | bch_cached_dev_attach(dc, c, NULL); |
cafe5635 KO |
1316 | |
1317 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || | |
1318 | BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) | |
1319 | bch_cached_dev_run(dc); | |
1320 | ||
f59fce84 | 1321 | return; |
cafe5635 | 1322 | err: |
6e916a7e | 1323 | pr_notice("error %s: %s", dc->backing_dev_name, err); |
f59fce84 | 1324 | bcache_device_stop(&dc->disk); |
cafe5635 KO |
1325 | } |
1326 | ||
1327 | /* Flash only volumes */ | |
1328 | ||
1329 | void bch_flash_dev_release(struct kobject *kobj) | |
1330 | { | |
1331 | struct bcache_device *d = container_of(kobj, struct bcache_device, | |
1332 | kobj); | |
1333 | kfree(d); | |
1334 | } | |
1335 | ||
1336 | static void flash_dev_free(struct closure *cl) | |
1337 | { | |
1338 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); | |
1fae7cf0 | 1339 | |
e5112201 | 1340 | mutex_lock(&bch_register_lock); |
99a27d59 TJ |
1341 | atomic_long_sub(bcache_dev_sectors_dirty(d), |
1342 | &d->c->flash_dev_dirty_sectors); | |
cafe5635 | 1343 | bcache_device_free(d); |
e5112201 | 1344 | mutex_unlock(&bch_register_lock); |
cafe5635 KO |
1345 | kobject_put(&d->kobj); |
1346 | } | |
1347 | ||
1348 | static void flash_dev_flush(struct closure *cl) | |
1349 | { | |
1350 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); | |
1351 | ||
e5112201 | 1352 | mutex_lock(&bch_register_lock); |
ee668506 | 1353 | bcache_device_unlink(d); |
e5112201 | 1354 | mutex_unlock(&bch_register_lock); |
cafe5635 KO |
1355 | kobject_del(&d->kobj); |
1356 | continue_at(cl, flash_dev_free, system_wq); | |
1357 | } | |
1358 | ||
1359 | static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) | |
1360 | { | |
1361 | struct bcache_device *d = kzalloc(sizeof(struct bcache_device), | |
1362 | GFP_KERNEL); | |
1363 | if (!d) | |
1364 | return -ENOMEM; | |
1365 | ||
1366 | closure_init(&d->cl, NULL); | |
1367 | set_closure_fn(&d->cl, flash_dev_flush, system_wq); | |
1368 | ||
1369 | kobject_init(&d->kobj, &bch_flash_dev_ktype); | |
1370 | ||
279afbad | 1371 | if (bcache_device_init(d, block_bytes(c), u->sectors)) |
cafe5635 KO |
1372 | goto err; |
1373 | ||
1374 | bcache_device_attach(d, c, u - c->uuids); | |
175206cf | 1375 | bch_sectors_dirty_init(d); |
cafe5635 KO |
1376 | bch_flash_dev_request_init(d); |
1377 | add_disk(d->disk); | |
1378 | ||
1379 | if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) | |
1380 | goto err; | |
1381 | ||
1382 | bcache_device_link(d, c, "volume"); | |
1383 | ||
1384 | return 0; | |
1385 | err: | |
1386 | kobject_put(&d->kobj); | |
1387 | return -ENOMEM; | |
1388 | } | |
1389 | ||
1390 | static int flash_devs_run(struct cache_set *c) | |
1391 | { | |
1392 | int ret = 0; | |
1393 | struct uuid_entry *u; | |
1394 | ||
1395 | for (u = c->uuids; | |
02aa8a8b | 1396 | u < c->uuids + c->nr_uuids && !ret; |
cafe5635 KO |
1397 | u++) |
1398 | if (UUID_FLASH_ONLY(u)) | |
1399 | ret = flash_dev_run(c, u); | |
1400 | ||
1401 | return ret; | |
1402 | } | |
1403 | ||
1404 | int bch_flash_dev_create(struct cache_set *c, uint64_t size) | |
1405 | { | |
1406 | struct uuid_entry *u; | |
1407 | ||
1408 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) | |
1409 | return -EINTR; | |
1410 | ||
bf0c55c9 SP |
1411 | if (!test_bit(CACHE_SET_RUNNING, &c->flags)) |
1412 | return -EPERM; | |
1413 | ||
cafe5635 KO |
1414 | u = uuid_find_empty(c); |
1415 | if (!u) { | |
1416 | pr_err("Can't create volume, no room for UUID"); | |
1417 | return -EINVAL; | |
1418 | } | |
1419 | ||
1420 | get_random_bytes(u->uuid, 16); | |
1421 | memset(u->label, 0, 32); | |
75cbb3f1 | 1422 | u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds()); |
cafe5635 KO |
1423 | |
1424 | SET_UUID_FLASH_ONLY(u, 1); | |
1425 | u->sectors = size >> 9; | |
1426 | ||
1427 | bch_uuid_write(c); | |
1428 | ||
1429 | return flash_dev_run(c, u); | |
1430 | } | |
1431 | ||
c7b7bd07 CL |
1432 | bool bch_cached_dev_error(struct cached_dev *dc) |
1433 | { | |
6147305c CL |
1434 | struct cache_set *c; |
1435 | ||
c7b7bd07 CL |
1436 | if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) |
1437 | return false; | |
1438 | ||
1439 | dc->io_disable = true; | |
1440 | /* make others know io_disable is true earlier */ | |
1441 | smp_mb(); | |
1442 | ||
1443 | pr_err("stop %s: too many IO errors on backing device %s\n", | |
6e916a7e | 1444 | dc->disk.disk->disk_name, dc->backing_dev_name); |
c7b7bd07 | 1445 | |
6147305c CL |
1446 | /* |
1447 | * If the cached device is still attached to a cache set, | |
1448 | * even dc->io_disable is true and no more I/O requests | |
1449 | * accepted, cache device internal I/O (writeback scan or | |
1450 | * garbage collection) may still prevent bcache device from | |
1451 | * being stopped. So here CACHE_SET_IO_DISABLE should be | |
1452 | * set to c->flags too, to make the internal I/O to cache | |
1453 | * device rejected and stopped immediately. | |
1454 | * If c is NULL, that means the bcache device is not attached | |
1455 | * to any cache set, then no CACHE_SET_IO_DISABLE bit to set. | |
1456 | */ | |
1457 | c = dc->disk.c; | |
1458 | if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) | |
1459 | pr_info("CACHE_SET_IO_DISABLE already set"); | |
1460 | ||
c7b7bd07 CL |
1461 | bcache_device_stop(&dc->disk); |
1462 | return true; | |
1463 | } | |
1464 | ||
cafe5635 KO |
1465 | /* Cache set */ |
1466 | ||
1467 | __printf(2, 3) | |
1468 | bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) | |
1469 | { | |
1470 | va_list args; | |
1471 | ||
77c320eb KO |
1472 | if (c->on_error != ON_ERROR_PANIC && |
1473 | test_bit(CACHE_SET_STOPPING, &c->flags)) | |
cafe5635 KO |
1474 | return false; |
1475 | ||
771f393e | 1476 | if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) |
09a44ca2 | 1477 | pr_info("CACHE_SET_IO_DISABLE already set"); |
771f393e | 1478 | |
3be11dba CL |
1479 | /* |
1480 | * XXX: we can be called from atomic context | |
1481 | * acquire_console_sem(); | |
1482 | */ | |
cafe5635 | 1483 | |
6ae63e35 | 1484 | pr_err("bcache: error on %pU: ", c->sb.set_uuid); |
cafe5635 KO |
1485 | |
1486 | va_start(args, fmt); | |
1487 | vprintk(fmt, args); | |
1488 | va_end(args); | |
1489 | ||
6ae63e35 | 1490 | pr_err(", disabling caching\n"); |
cafe5635 | 1491 | |
77c320eb KO |
1492 | if (c->on_error == ON_ERROR_PANIC) |
1493 | panic("panic forced after error\n"); | |
1494 | ||
cafe5635 KO |
1495 | bch_cache_set_unregister(c); |
1496 | return true; | |
1497 | } | |
1498 | ||
1499 | void bch_cache_set_release(struct kobject *kobj) | |
1500 | { | |
1501 | struct cache_set *c = container_of(kobj, struct cache_set, kobj); | |
1fae7cf0 | 1502 | |
cafe5635 KO |
1503 | kfree(c); |
1504 | module_put(THIS_MODULE); | |
1505 | } | |
1506 | ||
1507 | static void cache_set_free(struct closure *cl) | |
1508 | { | |
1509 | struct cache_set *c = container_of(cl, struct cache_set, cl); | |
1510 | struct cache *ca; | |
6f10f7d1 | 1511 | unsigned int i; |
cafe5635 | 1512 | |
ae171023 | 1513 | debugfs_remove(c->debug); |
cafe5635 KO |
1514 | |
1515 | bch_open_buckets_free(c); | |
1516 | bch_btree_cache_free(c); | |
1517 | bch_journal_free(c); | |
1518 | ||
1519 | for_each_cache(ca, c, i) | |
c9a78332 SP |
1520 | if (ca) { |
1521 | ca->set = NULL; | |
1522 | c->cache[ca->sb.nr_this_dev] = NULL; | |
cafe5635 | 1523 | kobject_put(&ca->kobj); |
c9a78332 | 1524 | } |
cafe5635 | 1525 | |
67539e85 | 1526 | bch_bset_sort_state_free(&c->sort); |
cafe5635 | 1527 | free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); |
cafe5635 | 1528 | |
da415a09 NS |
1529 | if (c->moving_gc_wq) |
1530 | destroy_workqueue(c->moving_gc_wq); | |
d19936a2 KO |
1531 | bioset_exit(&c->bio_split); |
1532 | mempool_exit(&c->fill_iter); | |
1533 | mempool_exit(&c->bio_meta); | |
1534 | mempool_exit(&c->search); | |
cafe5635 KO |
1535 | kfree(c->devices); |
1536 | ||
1537 | mutex_lock(&bch_register_lock); | |
1538 | list_del(&c->list); | |
1539 | mutex_unlock(&bch_register_lock); | |
1540 | ||
1541 | pr_info("Cache set %pU unregistered", c->sb.set_uuid); | |
1542 | wake_up(&unregister_wait); | |
1543 | ||
1544 | closure_debug_destroy(&c->cl); | |
1545 | kobject_put(&c->kobj); | |
1546 | } | |
1547 | ||
1548 | static void cache_set_flush(struct closure *cl) | |
1549 | { | |
1550 | struct cache_set *c = container_of(cl, struct cache_set, caching); | |
79826c35 | 1551 | struct cache *ca; |
cafe5635 | 1552 | struct btree *b; |
6f10f7d1 | 1553 | unsigned int i; |
cafe5635 KO |
1554 | |
1555 | bch_cache_accounting_destroy(&c->accounting); | |
1556 | ||
1557 | kobject_put(&c->internal); | |
1558 | kobject_del(&c->kobj); | |
1559 | ||
72a44517 KO |
1560 | if (c->gc_thread) |
1561 | kthread_stop(c->gc_thread); | |
1562 | ||
cafe5635 KO |
1563 | if (!IS_ERR_OR_NULL(c->root)) |
1564 | list_add(&c->root->list, &c->btree_cache); | |
1565 | ||
1566 | /* Should skip this if we're unregistering because of an error */ | |
2a285686 KO |
1567 | list_for_each_entry(b, &c->btree_cache, list) { |
1568 | mutex_lock(&b->write_lock); | |
cafe5635 | 1569 | if (btree_node_dirty(b)) |
2a285686 KO |
1570 | __bch_btree_node_write(b, NULL); |
1571 | mutex_unlock(&b->write_lock); | |
1572 | } | |
cafe5635 | 1573 | |
79826c35 KO |
1574 | for_each_cache(ca, c, i) |
1575 | if (ca->alloc_thread) | |
1576 | kthread_stop(ca->alloc_thread); | |
1577 | ||
5b1016e6 KO |
1578 | if (c->journal.cur) { |
1579 | cancel_delayed_work_sync(&c->journal.work); | |
1580 | /* flush last journal entry if needed */ | |
1581 | c->journal.work.work.func(&c->journal.work.work); | |
1582 | } | |
dabb4433 | 1583 | |
cafe5635 KO |
1584 | closure_return(cl); |
1585 | } | |
1586 | ||
7e027ca4 CL |
1587 | /* |
1588 | * This function is only called when CACHE_SET_IO_DISABLE is set, which means | |
1589 | * cache set is unregistering due to too many I/O errors. In this condition, | |
1590 | * the bcache device might be stopped, it depends on stop_when_cache_set_failed | |
1591 | * value and whether the broken cache has dirty data: | |
1592 | * | |
1593 | * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device | |
1594 | * BCH_CACHED_STOP_AUTO 0 NO | |
1595 | * BCH_CACHED_STOP_AUTO 1 YES | |
1596 | * BCH_CACHED_DEV_STOP_ALWAYS 0 YES | |
1597 | * BCH_CACHED_DEV_STOP_ALWAYS 1 YES | |
1598 | * | |
1599 | * The expected behavior is, if stop_when_cache_set_failed is configured to | |
1600 | * "auto" via sysfs interface, the bcache device will not be stopped if the | |
1601 | * backing device is clean on the broken cache device. | |
1602 | */ | |
1603 | static void conditional_stop_bcache_device(struct cache_set *c, | |
1604 | struct bcache_device *d, | |
1605 | struct cached_dev *dc) | |
1606 | { | |
1607 | if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) { | |
1608 | pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.", | |
1609 | d->disk->disk_name, c->sb.set_uuid); | |
1610 | bcache_device_stop(d); | |
1611 | } else if (atomic_read(&dc->has_dirty)) { | |
1612 | /* | |
1613 | * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO | |
1614 | * and dc->has_dirty == 1 | |
1615 | */ | |
1616 | pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.", | |
1617 | d->disk->disk_name); | |
e8cf978d CIK |
1618 | /* |
1619 | * There might be a small time gap that cache set is | |
1620 | * released but bcache device is not. Inside this time | |
1621 | * gap, regular I/O requests will directly go into | |
1622 | * backing device as no cache set attached to. This | |
1623 | * behavior may also introduce potential inconsistence | |
1624 | * data in writeback mode while cache is dirty. | |
1625 | * Therefore before calling bcache_device_stop() due | |
1626 | * to a broken cache device, dc->io_disable should be | |
1627 | * explicitly set to true. | |
1628 | */ | |
1629 | dc->io_disable = true; | |
1630 | /* make others know io_disable is true earlier */ | |
1631 | smp_mb(); | |
1632 | bcache_device_stop(d); | |
7e027ca4 CL |
1633 | } else { |
1634 | /* | |
1635 | * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO | |
1636 | * and dc->has_dirty == 0 | |
1637 | */ | |
1638 | pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.", | |
1639 | d->disk->disk_name); | |
1640 | } | |
1641 | } | |
1642 | ||
cafe5635 KO |
1643 | static void __cache_set_unregister(struct closure *cl) |
1644 | { | |
1645 | struct cache_set *c = container_of(cl, struct cache_set, caching); | |
5caa52af | 1646 | struct cached_dev *dc; |
7e027ca4 | 1647 | struct bcache_device *d; |
cafe5635 KO |
1648 | size_t i; |
1649 | ||
1650 | mutex_lock(&bch_register_lock); | |
1651 | ||
7e027ca4 CL |
1652 | for (i = 0; i < c->devices_max_used; i++) { |
1653 | d = c->devices[i]; | |
1654 | if (!d) | |
1655 | continue; | |
1656 | ||
1657 | if (!UUID_FLASH_ONLY(&c->uuids[i]) && | |
1658 | test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { | |
1659 | dc = container_of(d, struct cached_dev, disk); | |
1660 | bch_cached_dev_detach(dc); | |
1661 | if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) | |
1662 | conditional_stop_bcache_device(c, d, dc); | |
1663 | } else { | |
1664 | bcache_device_stop(d); | |
5caa52af | 1665 | } |
7e027ca4 | 1666 | } |
cafe5635 KO |
1667 | |
1668 | mutex_unlock(&bch_register_lock); | |
1669 | ||
1670 | continue_at(cl, cache_set_flush, system_wq); | |
1671 | } | |
1672 | ||
1673 | void bch_cache_set_stop(struct cache_set *c) | |
1674 | { | |
1675 | if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) | |
1676 | closure_queue(&c->caching); | |
1677 | } | |
1678 | ||
1679 | void bch_cache_set_unregister(struct cache_set *c) | |
1680 | { | |
1681 | set_bit(CACHE_SET_UNREGISTERING, &c->flags); | |
1682 | bch_cache_set_stop(c); | |
1683 | } | |
1684 | ||
1685 | #define alloc_bucket_pages(gfp, c) \ | |
1686 | ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) | |
1687 | ||
1688 | struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) | |
1689 | { | |
1690 | int iter_size; | |
1691 | struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); | |
1fae7cf0 | 1692 | |
cafe5635 KO |
1693 | if (!c) |
1694 | return NULL; | |
1695 | ||
1696 | __module_get(THIS_MODULE); | |
1697 | closure_init(&c->cl, NULL); | |
1698 | set_closure_fn(&c->cl, cache_set_free, system_wq); | |
1699 | ||
1700 | closure_init(&c->caching, &c->cl); | |
1701 | set_closure_fn(&c->caching, __cache_set_unregister, system_wq); | |
1702 | ||
1703 | /* Maybe create continue_at_noreturn() and use it here? */ | |
1704 | closure_set_stopped(&c->cl); | |
1705 | closure_put(&c->cl); | |
1706 | ||
1707 | kobject_init(&c->kobj, &bch_cache_set_ktype); | |
1708 | kobject_init(&c->internal, &bch_cache_set_internal_ktype); | |
1709 | ||
1710 | bch_cache_accounting_init(&c->accounting, &c->cl); | |
1711 | ||
1712 | memcpy(c->sb.set_uuid, sb->set_uuid, 16); | |
1713 | c->sb.block_size = sb->block_size; | |
1714 | c->sb.bucket_size = sb->bucket_size; | |
1715 | c->sb.nr_in_set = sb->nr_in_set; | |
1716 | c->sb.last_mount = sb->last_mount; | |
1717 | c->bucket_bits = ilog2(sb->bucket_size); | |
1718 | c->block_bits = ilog2(sb->block_size); | |
1719 | c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); | |
2831231d | 1720 | c->devices_max_used = 0; |
ea8c5356 | 1721 | atomic_set(&c->attached_dev_nr, 0); |
ee811287 | 1722 | c->btree_pages = bucket_pages(c); |
cafe5635 KO |
1723 | if (c->btree_pages > BTREE_MAX_PAGES) |
1724 | c->btree_pages = max_t(int, c->btree_pages / 4, | |
1725 | BTREE_MAX_PAGES); | |
1726 | ||
cb7a583e | 1727 | sema_init(&c->sb_write_mutex, 1); |
e8e1d468 | 1728 | mutex_init(&c->bucket_lock); |
0a63b66d | 1729 | init_waitqueue_head(&c->btree_cache_wait); |
35fcd848 | 1730 | init_waitqueue_head(&c->bucket_wait); |
be628be0 | 1731 | init_waitqueue_head(&c->gc_wait); |
cb7a583e | 1732 | sema_init(&c->uuid_write_mutex, 1); |
65d22e91 | 1733 | |
65d22e91 KO |
1734 | spin_lock_init(&c->btree_gc_time.lock); |
1735 | spin_lock_init(&c->btree_split_time.lock); | |
1736 | spin_lock_init(&c->btree_read_time.lock); | |
e8e1d468 | 1737 | |
cafe5635 KO |
1738 | bch_moving_init_cache_set(c); |
1739 | ||
1740 | INIT_LIST_HEAD(&c->list); | |
1741 | INIT_LIST_HEAD(&c->cached_devs); | |
1742 | INIT_LIST_HEAD(&c->btree_cache); | |
1743 | INIT_LIST_HEAD(&c->btree_cache_freeable); | |
1744 | INIT_LIST_HEAD(&c->btree_cache_freed); | |
1745 | INIT_LIST_HEAD(&c->data_buckets); | |
1746 | ||
cafe5635 KO |
1747 | iter_size = (sb->bucket_size / sb->block_size + 1) * |
1748 | sizeof(struct btree_iter_set); | |
1749 | ||
6396bb22 | 1750 | if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) || |
d19936a2 KO |
1751 | mempool_init_slab_pool(&c->search, 32, bch_search_cache) || |
1752 | mempool_init_kmalloc_pool(&c->bio_meta, 2, | |
b0d30981 CL |
1753 | sizeof(struct bbio) + sizeof(struct bio_vec) * |
1754 | bucket_pages(c)) || | |
d19936a2 KO |
1755 | mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) || |
1756 | bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), | |
1757 | BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) || | |
cafe5635 | 1758 | !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || |
81baf90a BS |
1759 | !(c->moving_gc_wq = alloc_workqueue("bcache_gc", |
1760 | WQ_MEM_RECLAIM, 0)) || | |
cafe5635 KO |
1761 | bch_journal_alloc(c) || |
1762 | bch_btree_cache_alloc(c) || | |
67539e85 KO |
1763 | bch_open_buckets_alloc(c) || |
1764 | bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) | |
cafe5635 KO |
1765 | goto err; |
1766 | ||
cafe5635 KO |
1767 | c->congested_read_threshold_us = 2000; |
1768 | c->congested_write_threshold_us = 20000; | |
7ba0d830 | 1769 | c->error_limit = DEFAULT_IO_ERROR_LIMIT; |
771f393e | 1770 | WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); |
cafe5635 KO |
1771 | |
1772 | return c; | |
1773 | err: | |
1774 | bch_cache_set_unregister(c); | |
1775 | return NULL; | |
1776 | } | |
1777 | ||
1778 | static void run_cache_set(struct cache_set *c) | |
1779 | { | |
1780 | const char *err = "cannot allocate memory"; | |
1781 | struct cached_dev *dc, *t; | |
1782 | struct cache *ca; | |
c18536a7 | 1783 | struct closure cl; |
6f10f7d1 | 1784 | unsigned int i; |
cafe5635 | 1785 | |
c18536a7 | 1786 | closure_init_stack(&cl); |
cafe5635 KO |
1787 | |
1788 | for_each_cache(ca, c, i) | |
1789 | c->nbuckets += ca->sb.nbuckets; | |
be628be0 | 1790 | set_gc_sectors(c); |
cafe5635 KO |
1791 | |
1792 | if (CACHE_SYNC(&c->sb)) { | |
1793 | LIST_HEAD(journal); | |
1794 | struct bkey *k; | |
1795 | struct jset *j; | |
1796 | ||
1797 | err = "cannot allocate memory for journal"; | |
c18536a7 | 1798 | if (bch_journal_read(c, &journal)) |
cafe5635 KO |
1799 | goto err; |
1800 | ||
1801 | pr_debug("btree_journal_read() done"); | |
1802 | ||
1803 | err = "no journal entries found"; | |
1804 | if (list_empty(&journal)) | |
1805 | goto err; | |
1806 | ||
1807 | j = &list_entry(journal.prev, struct journal_replay, list)->j; | |
1808 | ||
1809 | err = "IO error reading priorities"; | |
1810 | for_each_cache(ca, c, i) | |
1811 | prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); | |
1812 | ||
1813 | /* | |
1814 | * If prio_read() fails it'll call cache_set_error and we'll | |
1815 | * tear everything down right away, but if we perhaps checked | |
1816 | * sooner we could avoid journal replay. | |
1817 | */ | |
1818 | ||
1819 | k = &j->btree_root; | |
1820 | ||
1821 | err = "bad btree root"; | |
65d45231 | 1822 | if (__bch_btree_ptr_invalid(c, k)) |
cafe5635 KO |
1823 | goto err; |
1824 | ||
1825 | err = "error reading btree root"; | |
b0d30981 CL |
1826 | c->root = bch_btree_node_get(c, NULL, k, |
1827 | j->btree_level, | |
1828 | true, NULL); | |
cafe5635 KO |
1829 | if (IS_ERR_OR_NULL(c->root)) |
1830 | goto err; | |
1831 | ||
1832 | list_del_init(&c->root->list); | |
1833 | rw_unlock(true, c->root); | |
1834 | ||
c18536a7 | 1835 | err = uuid_read(c, j, &cl); |
cafe5635 KO |
1836 | if (err) |
1837 | goto err; | |
1838 | ||
1839 | err = "error in recovery"; | |
c18536a7 | 1840 | if (bch_btree_check(c)) |
cafe5635 KO |
1841 | goto err; |
1842 | ||
1843 | bch_journal_mark(c, &journal); | |
2531d9ee | 1844 | bch_initial_gc_finish(c); |
cafe5635 KO |
1845 | pr_debug("btree_check() done"); |
1846 | ||
1847 | /* | |
1848 | * bcache_journal_next() can't happen sooner, or | |
1849 | * btree_gc_finish() will give spurious errors about last_gc > | |
1850 | * gc_gen - this is a hack but oh well. | |
1851 | */ | |
1852 | bch_journal_next(&c->journal); | |
1853 | ||
119ba0f8 | 1854 | err = "error starting allocator thread"; |
cafe5635 | 1855 | for_each_cache(ca, c, i) |
119ba0f8 KO |
1856 | if (bch_cache_allocator_start(ca)) |
1857 | goto err; | |
cafe5635 KO |
1858 | |
1859 | /* | |
1860 | * First place it's safe to allocate: btree_check() and | |
1861 | * btree_gc_finish() have to run before we have buckets to | |
1862 | * allocate, and bch_bucket_alloc_set() might cause a journal | |
1863 | * entry to be written so bcache_journal_next() has to be called | |
1864 | * first. | |
1865 | * | |
1866 | * If the uuids were in the old format we have to rewrite them | |
1867 | * before the next journal entry is written: | |
1868 | */ | |
1869 | if (j->version < BCACHE_JSET_VERSION_UUID) | |
1870 | __uuid_write(c); | |
1871 | ||
c18536a7 | 1872 | bch_journal_replay(c, &journal); |
cafe5635 KO |
1873 | } else { |
1874 | pr_notice("invalidating existing data"); | |
cafe5635 KO |
1875 | |
1876 | for_each_cache(ca, c, i) { | |
6f10f7d1 | 1877 | unsigned int j; |
cafe5635 KO |
1878 | |
1879 | ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, | |
1880 | 2, SB_JOURNAL_BUCKETS); | |
1881 | ||
1882 | for (j = 0; j < ca->sb.keys; j++) | |
1883 | ca->sb.d[j] = ca->sb.first_bucket + j; | |
1884 | } | |
1885 | ||
2531d9ee | 1886 | bch_initial_gc_finish(c); |
cafe5635 | 1887 | |
119ba0f8 | 1888 | err = "error starting allocator thread"; |
cafe5635 | 1889 | for_each_cache(ca, c, i) |
119ba0f8 KO |
1890 | if (bch_cache_allocator_start(ca)) |
1891 | goto err; | |
cafe5635 KO |
1892 | |
1893 | mutex_lock(&c->bucket_lock); | |
1894 | for_each_cache(ca, c, i) | |
1895 | bch_prio_write(ca); | |
1896 | mutex_unlock(&c->bucket_lock); | |
1897 | ||
cafe5635 KO |
1898 | err = "cannot allocate new UUID bucket"; |
1899 | if (__uuid_write(c)) | |
72a44517 | 1900 | goto err; |
cafe5635 KO |
1901 | |
1902 | err = "cannot allocate new btree root"; | |
2452cc89 | 1903 | c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); |
cafe5635 | 1904 | if (IS_ERR_OR_NULL(c->root)) |
72a44517 | 1905 | goto err; |
cafe5635 | 1906 | |
2a285686 | 1907 | mutex_lock(&c->root->write_lock); |
cafe5635 | 1908 | bkey_copy_key(&c->root->key, &MAX_KEY); |
c18536a7 | 1909 | bch_btree_node_write(c->root, &cl); |
2a285686 | 1910 | mutex_unlock(&c->root->write_lock); |
cafe5635 KO |
1911 | |
1912 | bch_btree_set_root(c->root); | |
1913 | rw_unlock(true, c->root); | |
1914 | ||
1915 | /* | |
1916 | * We don't want to write the first journal entry until | |
1917 | * everything is set up - fortunately journal entries won't be | |
1918 | * written until the SET_CACHE_SYNC() here: | |
1919 | */ | |
1920 | SET_CACHE_SYNC(&c->sb, true); | |
1921 | ||
1922 | bch_journal_next(&c->journal); | |
c18536a7 | 1923 | bch_journal_meta(c, &cl); |
cafe5635 KO |
1924 | } |
1925 | ||
72a44517 KO |
1926 | err = "error starting gc thread"; |
1927 | if (bch_gc_thread_start(c)) | |
1928 | goto err; | |
1929 | ||
c18536a7 | 1930 | closure_sync(&cl); |
75cbb3f1 | 1931 | c->sb.last_mount = (u32)ktime_get_real_seconds(); |
cafe5635 KO |
1932 | bcache_write_super(c); |
1933 | ||
1934 | list_for_each_entry_safe(dc, t, &uncached_devices, list) | |
73ac105b | 1935 | bch_cached_dev_attach(dc, c, NULL); |
cafe5635 KO |
1936 | |
1937 | flash_devs_run(c); | |
1938 | ||
bf0c55c9 | 1939 | set_bit(CACHE_SET_RUNNING, &c->flags); |
cafe5635 | 1940 | return; |
cafe5635 | 1941 | err: |
c18536a7 | 1942 | closure_sync(&cl); |
cafe5635 | 1943 | /* XXX: test this, it's broken */ |
c8694948 | 1944 | bch_cache_set_error(c, "%s", err); |
cafe5635 KO |
1945 | } |
1946 | ||
1947 | static bool can_attach_cache(struct cache *ca, struct cache_set *c) | |
1948 | { | |
1949 | return ca->sb.block_size == c->sb.block_size && | |
9eb8ebeb | 1950 | ca->sb.bucket_size == c->sb.bucket_size && |
cafe5635 KO |
1951 | ca->sb.nr_in_set == c->sb.nr_in_set; |
1952 | } | |
1953 | ||
1954 | static const char *register_cache_set(struct cache *ca) | |
1955 | { | |
1956 | char buf[12]; | |
1957 | const char *err = "cannot allocate memory"; | |
1958 | struct cache_set *c; | |
1959 | ||
1960 | list_for_each_entry(c, &bch_cache_sets, list) | |
1961 | if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { | |
1962 | if (c->cache[ca->sb.nr_this_dev]) | |
1963 | return "duplicate cache set member"; | |
1964 | ||
1965 | if (!can_attach_cache(ca, c)) | |
1966 | return "cache sb does not match set"; | |
1967 | ||
1968 | if (!CACHE_SYNC(&ca->sb)) | |
1969 | SET_CACHE_SYNC(&c->sb, false); | |
1970 | ||
1971 | goto found; | |
1972 | } | |
1973 | ||
1974 | c = bch_cache_set_alloc(&ca->sb); | |
1975 | if (!c) | |
1976 | return err; | |
1977 | ||
1978 | err = "error creating kobject"; | |
1979 | if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || | |
1980 | kobject_add(&c->internal, &c->kobj, "internal")) | |
1981 | goto err; | |
1982 | ||
1983 | if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) | |
1984 | goto err; | |
1985 | ||
1986 | bch_debug_init_cache_set(c); | |
1987 | ||
1988 | list_add(&c->list, &bch_cache_sets); | |
1989 | found: | |
1990 | sprintf(buf, "cache%i", ca->sb.nr_this_dev); | |
1991 | if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || | |
1992 | sysfs_create_link(&c->kobj, &ca->kobj, buf)) | |
1993 | goto err; | |
1994 | ||
1995 | if (ca->sb.seq > c->sb.seq) { | |
1996 | c->sb.version = ca->sb.version; | |
1997 | memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); | |
1998 | c->sb.flags = ca->sb.flags; | |
1999 | c->sb.seq = ca->sb.seq; | |
2000 | pr_debug("set version = %llu", c->sb.version); | |
2001 | } | |
2002 | ||
d83353b3 | 2003 | kobject_get(&ca->kobj); |
cafe5635 KO |
2004 | ca->set = c; |
2005 | ca->set->cache[ca->sb.nr_this_dev] = ca; | |
2006 | c->cache_by_alloc[c->caches_loaded++] = ca; | |
2007 | ||
2008 | if (c->caches_loaded == c->sb.nr_in_set) | |
2009 | run_cache_set(c); | |
2010 | ||
2011 | return NULL; | |
2012 | err: | |
2013 | bch_cache_set_unregister(c); | |
2014 | return err; | |
2015 | } | |
2016 | ||
2017 | /* Cache device */ | |
2018 | ||
2019 | void bch_cache_release(struct kobject *kobj) | |
2020 | { | |
2021 | struct cache *ca = container_of(kobj, struct cache, kobj); | |
6f10f7d1 | 2022 | unsigned int i; |
cafe5635 | 2023 | |
c9a78332 SP |
2024 | if (ca->set) { |
2025 | BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); | |
cafe5635 | 2026 | ca->set->cache[ca->sb.nr_this_dev] = NULL; |
c9a78332 | 2027 | } |
cafe5635 | 2028 | |
cafe5635 KO |
2029 | free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); |
2030 | kfree(ca->prio_buckets); | |
2031 | vfree(ca->buckets); | |
2032 | ||
2033 | free_heap(&ca->heap); | |
cafe5635 | 2034 | free_fifo(&ca->free_inc); |
78365411 KO |
2035 | |
2036 | for (i = 0; i < RESERVE_NR; i++) | |
2037 | free_fifo(&ca->free[i]); | |
cafe5635 KO |
2038 | |
2039 | if (ca->sb_bio.bi_inline_vecs[0].bv_page) | |
263663cd | 2040 | put_page(bio_first_page_all(&ca->sb_bio)); |
cafe5635 | 2041 | |
0781c874 | 2042 | if (!IS_ERR_OR_NULL(ca->bdev)) |
cafe5635 | 2043 | blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
cafe5635 KO |
2044 | |
2045 | kfree(ca); | |
2046 | module_put(THIS_MODULE); | |
2047 | } | |
2048 | ||
c50d4d5d | 2049 | static int cache_alloc(struct cache *ca) |
cafe5635 KO |
2050 | { |
2051 | size_t free; | |
682811b3 | 2052 | size_t btree_buckets; |
cafe5635 | 2053 | struct bucket *b; |
f6027bca DC |
2054 | int ret = -ENOMEM; |
2055 | const char *err = NULL; | |
cafe5635 | 2056 | |
cafe5635 KO |
2057 | __module_get(THIS_MODULE); |
2058 | kobject_init(&ca->kobj, &bch_cache_ktype); | |
2059 | ||
3a83f467 | 2060 | bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); |
cafe5635 | 2061 | |
682811b3 TJ |
2062 | /* |
2063 | * when ca->sb.njournal_buckets is not zero, journal exists, | |
2064 | * and in bch_journal_replay(), tree node may split, | |
2065 | * so bucket of RESERVE_BTREE type is needed, | |
2066 | * the worst situation is all journal buckets are valid journal, | |
2067 | * and all the keys need to replay, | |
2068 | * so the number of RESERVE_BTREE type buckets should be as much | |
2069 | * as journal buckets | |
2070 | */ | |
2071 | btree_buckets = ca->sb.njournal_buckets ?: 8; | |
78365411 | 2072 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; |
3a646fd7 DC |
2073 | if (!free) { |
2074 | ret = -EPERM; | |
2075 | err = "ca->sb.nbuckets is too small"; | |
2076 | goto err_free; | |
2077 | } | |
cafe5635 | 2078 | |
f6027bca DC |
2079 | if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, |
2080 | GFP_KERNEL)) { | |
2081 | err = "ca->free[RESERVE_BTREE] alloc failed"; | |
2082 | goto err_btree_alloc; | |
2083 | } | |
2084 | ||
2085 | if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), | |
2086 | GFP_KERNEL)) { | |
2087 | err = "ca->free[RESERVE_PRIO] alloc failed"; | |
2088 | goto err_prio_alloc; | |
2089 | } | |
2090 | ||
2091 | if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) { | |
2092 | err = "ca->free[RESERVE_MOVINGGC] alloc failed"; | |
2093 | goto err_movinggc_alloc; | |
2094 | } | |
2095 | ||
2096 | if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) { | |
2097 | err = "ca->free[RESERVE_NONE] alloc failed"; | |
2098 | goto err_none_alloc; | |
2099 | } | |
2100 | ||
2101 | if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) { | |
2102 | err = "ca->free_inc alloc failed"; | |
2103 | goto err_free_inc_alloc; | |
2104 | } | |
2105 | ||
2106 | if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) { | |
2107 | err = "ca->heap alloc failed"; | |
2108 | goto err_heap_alloc; | |
2109 | } | |
2110 | ||
2111 | ca->buckets = vzalloc(array_size(sizeof(struct bucket), | |
2112 | ca->sb.nbuckets)); | |
2113 | if (!ca->buckets) { | |
2114 | err = "ca->buckets alloc failed"; | |
2115 | goto err_buckets_alloc; | |
2116 | } | |
2117 | ||
2118 | ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), | |
2119 | prio_buckets(ca), 2), | |
2120 | GFP_KERNEL); | |
2121 | if (!ca->prio_buckets) { | |
2122 | err = "ca->prio_buckets alloc failed"; | |
2123 | goto err_prio_buckets_alloc; | |
2124 | } | |
2125 | ||
2126 | ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca); | |
2127 | if (!ca->disk_buckets) { | |
2128 | err = "ca->disk_buckets alloc failed"; | |
2129 | goto err_disk_buckets_alloc; | |
2130 | } | |
cafe5635 KO |
2131 | |
2132 | ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); | |
2133 | ||
cafe5635 KO |
2134 | for_each_bucket(b, ca) |
2135 | atomic_set(&b->pin, 0); | |
cafe5635 | 2136 | return 0; |
f6027bca DC |
2137 | |
2138 | err_disk_buckets_alloc: | |
2139 | kfree(ca->prio_buckets); | |
2140 | err_prio_buckets_alloc: | |
2141 | vfree(ca->buckets); | |
2142 | err_buckets_alloc: | |
2143 | free_heap(&ca->heap); | |
2144 | err_heap_alloc: | |
2145 | free_fifo(&ca->free_inc); | |
2146 | err_free_inc_alloc: | |
2147 | free_fifo(&ca->free[RESERVE_NONE]); | |
2148 | err_none_alloc: | |
2149 | free_fifo(&ca->free[RESERVE_MOVINGGC]); | |
2150 | err_movinggc_alloc: | |
2151 | free_fifo(&ca->free[RESERVE_PRIO]); | |
2152 | err_prio_alloc: | |
2153 | free_fifo(&ca->free[RESERVE_BTREE]); | |
2154 | err_btree_alloc: | |
3a646fd7 | 2155 | err_free: |
f6027bca DC |
2156 | module_put(THIS_MODULE); |
2157 | if (err) | |
2158 | pr_notice("error %s: %s", ca->cache_dev_name, err); | |
2159 | return ret; | |
cafe5635 KO |
2160 | } |
2161 | ||
9b299728 | 2162 | static int register_cache(struct cache_sb *sb, struct page *sb_page, |
c9a78332 | 2163 | struct block_device *bdev, struct cache *ca) |
cafe5635 | 2164 | { |
d9dc1702 | 2165 | const char *err = NULL; /* must be set for any error case */ |
9b299728 | 2166 | int ret = 0; |
cafe5635 | 2167 | |
6e916a7e | 2168 | bdevname(bdev, ca->cache_dev_name); |
f59fce84 | 2169 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
cafe5635 KO |
2170 | ca->bdev = bdev; |
2171 | ca->bdev->bd_holder = ca; | |
2172 | ||
3a83f467 | 2173 | bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1); |
263663cd | 2174 | bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; |
f59fce84 KO |
2175 | get_page(sb_page); |
2176 | ||
cc40daf9 | 2177 | if (blk_queue_discard(bdev_get_queue(bdev))) |
cafe5635 KO |
2178 | ca->discard = CACHE_DISCARD(&ca->sb); |
2179 | ||
c50d4d5d | 2180 | ret = cache_alloc(ca); |
d9dc1702 | 2181 | if (ret != 0) { |
cc40daf9 | 2182 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
d9dc1702 EW |
2183 | if (ret == -ENOMEM) |
2184 | err = "cache_alloc(): -ENOMEM"; | |
3a646fd7 DC |
2185 | else if (ret == -EPERM) |
2186 | err = "cache_alloc(): cache device is too small"; | |
d9dc1702 EW |
2187 | else |
2188 | err = "cache_alloc(): unknown error"; | |
f59fce84 | 2189 | goto err; |
d9dc1702 | 2190 | } |
f59fce84 | 2191 | |
b0d30981 CL |
2192 | if (kobject_add(&ca->kobj, |
2193 | &part_to_dev(bdev->bd_part)->kobj, | |
2194 | "bcache")) { | |
9b299728 EW |
2195 | err = "error calling kobject_add"; |
2196 | ret = -ENOMEM; | |
2197 | goto out; | |
2198 | } | |
cafe5635 | 2199 | |
4fa03402 | 2200 | mutex_lock(&bch_register_lock); |
cafe5635 | 2201 | err = register_cache_set(ca); |
4fa03402 KO |
2202 | mutex_unlock(&bch_register_lock); |
2203 | ||
9b299728 EW |
2204 | if (err) { |
2205 | ret = -ENODEV; | |
2206 | goto out; | |
2207 | } | |
cafe5635 | 2208 | |
6e916a7e | 2209 | pr_info("registered cache device %s", ca->cache_dev_name); |
9b299728 | 2210 | |
d83353b3 KO |
2211 | out: |
2212 | kobject_put(&ca->kobj); | |
9b299728 | 2213 | |
cafe5635 | 2214 | err: |
9b299728 | 2215 | if (err) |
6e916a7e | 2216 | pr_notice("error %s: %s", ca->cache_dev_name, err); |
9b299728 EW |
2217 | |
2218 | return ret; | |
cafe5635 KO |
2219 | } |
2220 | ||
2221 | /* Global interfaces/init */ | |
2222 | ||
fc2d5988 CL |
2223 | static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, |
2224 | const char *buffer, size_t size); | |
cafe5635 KO |
2225 | |
2226 | kobj_attribute_write(register, register_bcache); | |
2227 | kobj_attribute_write(register_quiet, register_bcache); | |
2228 | ||
b3cf37bf CL |
2229 | static bool bch_is_open_backing(struct block_device *bdev) |
2230 | { | |
a9dd53ad GP |
2231 | struct cache_set *c, *tc; |
2232 | struct cached_dev *dc, *t; | |
2233 | ||
2234 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) | |
2235 | list_for_each_entry_safe(dc, t, &c->cached_devs, list) | |
2236 | if (dc->bdev == bdev) | |
2237 | return true; | |
2238 | list_for_each_entry_safe(dc, t, &uncached_devices, list) | |
2239 | if (dc->bdev == bdev) | |
2240 | return true; | |
2241 | return false; | |
2242 | } | |
2243 | ||
b3cf37bf CL |
2244 | static bool bch_is_open_cache(struct block_device *bdev) |
2245 | { | |
a9dd53ad GP |
2246 | struct cache_set *c, *tc; |
2247 | struct cache *ca; | |
6f10f7d1 | 2248 | unsigned int i; |
a9dd53ad GP |
2249 | |
2250 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) | |
2251 | for_each_cache(ca, c, i) | |
2252 | if (ca->bdev == bdev) | |
2253 | return true; | |
2254 | return false; | |
2255 | } | |
2256 | ||
b3cf37bf CL |
2257 | static bool bch_is_open(struct block_device *bdev) |
2258 | { | |
a9dd53ad GP |
2259 | return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); |
2260 | } | |
2261 | ||
cafe5635 KO |
2262 | static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, |
2263 | const char *buffer, size_t size) | |
2264 | { | |
2265 | ssize_t ret = size; | |
2266 | const char *err = "cannot allocate memory"; | |
2267 | char *path = NULL; | |
2268 | struct cache_sb *sb = NULL; | |
2269 | struct block_device *bdev = NULL; | |
2270 | struct page *sb_page = NULL; | |
2271 | ||
2272 | if (!try_module_get(THIS_MODULE)) | |
2273 | return -EBUSY; | |
2274 | ||
a56489d4 FS |
2275 | path = kstrndup(buffer, size, GFP_KERNEL); |
2276 | if (!path) | |
2277 | goto err; | |
2278 | ||
2279 | sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); | |
2280 | if (!sb) | |
cafe5635 KO |
2281 | goto err; |
2282 | ||
2283 | err = "failed to open device"; | |
2284 | bdev = blkdev_get_by_path(strim(path), | |
2285 | FMODE_READ|FMODE_WRITE|FMODE_EXCL, | |
2286 | sb); | |
f59fce84 | 2287 | if (IS_ERR(bdev)) { |
a9dd53ad GP |
2288 | if (bdev == ERR_PTR(-EBUSY)) { |
2289 | bdev = lookup_bdev(strim(path)); | |
789d21db | 2290 | mutex_lock(&bch_register_lock); |
a9dd53ad GP |
2291 | if (!IS_ERR(bdev) && bch_is_open(bdev)) |
2292 | err = "device already registered"; | |
2293 | else | |
2294 | err = "device busy"; | |
789d21db | 2295 | mutex_unlock(&bch_register_lock); |
4b758df2 JK |
2296 | if (!IS_ERR(bdev)) |
2297 | bdput(bdev); | |
d7076f21 GP |
2298 | if (attr == &ksysfs_register_quiet) |
2299 | goto out; | |
a9dd53ad | 2300 | } |
cafe5635 | 2301 | goto err; |
f59fce84 KO |
2302 | } |
2303 | ||
2304 | err = "failed to set blocksize"; | |
2305 | if (set_blocksize(bdev, 4096)) | |
2306 | goto err_close; | |
cafe5635 KO |
2307 | |
2308 | err = read_super(sb, bdev, &sb_page); | |
2309 | if (err) | |
2310 | goto err_close; | |
2311 | ||
cc40daf9 | 2312 | err = "failed to register device"; |
2903381f | 2313 | if (SB_IS_BDEV(sb)) { |
cafe5635 | 2314 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); |
1fae7cf0 | 2315 | |
f59fce84 KO |
2316 | if (!dc) |
2317 | goto err_close; | |
cafe5635 | 2318 | |
4fa03402 | 2319 | mutex_lock(&bch_register_lock); |
f59fce84 | 2320 | register_bdev(sb, sb_page, bdev, dc); |
4fa03402 | 2321 | mutex_unlock(&bch_register_lock); |
cafe5635 KO |
2322 | } else { |
2323 | struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); | |
1fae7cf0 | 2324 | |
f59fce84 KO |
2325 | if (!ca) |
2326 | goto err_close; | |
cafe5635 | 2327 | |
9b299728 | 2328 | if (register_cache(sb, sb_page, bdev, ca) != 0) |
cc40daf9 | 2329 | goto err; |
cafe5635 | 2330 | } |
f59fce84 KO |
2331 | out: |
2332 | if (sb_page) | |
cafe5635 | 2333 | put_page(sb_page); |
cafe5635 KO |
2334 | kfree(sb); |
2335 | kfree(path); | |
cafe5635 KO |
2336 | module_put(THIS_MODULE); |
2337 | return ret; | |
f59fce84 KO |
2338 | |
2339 | err_close: | |
2340 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | |
2341 | err: | |
cc40daf9 | 2342 | pr_info("error %s: %s", path, err); |
f59fce84 KO |
2343 | ret = -EINVAL; |
2344 | goto out; | |
cafe5635 KO |
2345 | } |
2346 | ||
2347 | static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) | |
2348 | { | |
2349 | if (code == SYS_DOWN || | |
2350 | code == SYS_HALT || | |
2351 | code == SYS_POWER_OFF) { | |
2352 | DEFINE_WAIT(wait); | |
2353 | unsigned long start = jiffies; | |
2354 | bool stopped = false; | |
2355 | ||
2356 | struct cache_set *c, *tc; | |
2357 | struct cached_dev *dc, *tdc; | |
2358 | ||
2359 | mutex_lock(&bch_register_lock); | |
2360 | ||
2361 | if (list_empty(&bch_cache_sets) && | |
2362 | list_empty(&uncached_devices)) | |
2363 | goto out; | |
2364 | ||
2365 | pr_info("Stopping all devices:"); | |
2366 | ||
2367 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) | |
2368 | bch_cache_set_stop(c); | |
2369 | ||
2370 | list_for_each_entry_safe(dc, tdc, &uncached_devices, list) | |
2371 | bcache_device_stop(&dc->disk); | |
2372 | ||
2373 | /* What's a condition variable? */ | |
2374 | while (1) { | |
2375 | long timeout = start + 2 * HZ - jiffies; | |
2376 | ||
2377 | stopped = list_empty(&bch_cache_sets) && | |
2378 | list_empty(&uncached_devices); | |
2379 | ||
2380 | if (timeout < 0 || stopped) | |
2381 | break; | |
2382 | ||
2383 | prepare_to_wait(&unregister_wait, &wait, | |
2384 | TASK_UNINTERRUPTIBLE); | |
2385 | ||
2386 | mutex_unlock(&bch_register_lock); | |
2387 | schedule_timeout(timeout); | |
2388 | mutex_lock(&bch_register_lock); | |
2389 | } | |
2390 | ||
2391 | finish_wait(&unregister_wait, &wait); | |
2392 | ||
2393 | if (stopped) | |
2394 | pr_info("All devices stopped"); | |
2395 | else | |
2396 | pr_notice("Timeout waiting for devices to be closed"); | |
2397 | out: | |
2398 | mutex_unlock(&bch_register_lock); | |
2399 | } | |
2400 | ||
2401 | return NOTIFY_DONE; | |
2402 | } | |
2403 | ||
2404 | static struct notifier_block reboot = { | |
2405 | .notifier_call = bcache_reboot, | |
2406 | .priority = INT_MAX, /* before any real devices */ | |
2407 | }; | |
2408 | ||
2409 | static void bcache_exit(void) | |
2410 | { | |
2411 | bch_debug_exit(); | |
cafe5635 | 2412 | bch_request_exit(); |
cafe5635 KO |
2413 | if (bcache_kobj) |
2414 | kobject_put(bcache_kobj); | |
2415 | if (bcache_wq) | |
2416 | destroy_workqueue(bcache_wq); | |
0f843e65 GF |
2417 | if (bch_journal_wq) |
2418 | destroy_workqueue(bch_journal_wq); | |
2419 | ||
5c41c8a7 KO |
2420 | if (bcache_major) |
2421 | unregister_blkdev(bcache_major, "bcache"); | |
cafe5635 | 2422 | unregister_reboot_notifier(&reboot); |
330a4db8 | 2423 | mutex_destroy(&bch_register_lock); |
cafe5635 KO |
2424 | } |
2425 | ||
9aaf5165 CL |
2426 | /* Check and fixup module parameters */ |
2427 | static void check_module_parameters(void) | |
2428 | { | |
2429 | if (bch_cutoff_writeback_sync == 0) | |
2430 | bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC; | |
2431 | else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) { | |
2432 | pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u", | |
2433 | bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX); | |
2434 | bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX; | |
2435 | } | |
2436 | ||
2437 | if (bch_cutoff_writeback == 0) | |
2438 | bch_cutoff_writeback = CUTOFF_WRITEBACK; | |
2439 | else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) { | |
2440 | pr_warn("set bch_cutoff_writeback (%u) to max value %u", | |
2441 | bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX); | |
2442 | bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX; | |
2443 | } | |
2444 | ||
2445 | if (bch_cutoff_writeback > bch_cutoff_writeback_sync) { | |
2446 | pr_warn("set bch_cutoff_writeback (%u) to %u", | |
2447 | bch_cutoff_writeback, bch_cutoff_writeback_sync); | |
2448 | bch_cutoff_writeback = bch_cutoff_writeback_sync; | |
2449 | } | |
2450 | } | |
2451 | ||
cafe5635 KO |
2452 | static int __init bcache_init(void) |
2453 | { | |
2454 | static const struct attribute *files[] = { | |
2455 | &ksysfs_register.attr, | |
2456 | &ksysfs_register_quiet.attr, | |
2457 | NULL | |
2458 | }; | |
2459 | ||
9aaf5165 CL |
2460 | check_module_parameters(); |
2461 | ||
cafe5635 KO |
2462 | mutex_init(&bch_register_lock); |
2463 | init_waitqueue_head(&unregister_wait); | |
2464 | register_reboot_notifier(&reboot); | |
2465 | ||
2466 | bcache_major = register_blkdev(0, "bcache"); | |
2ecf0cdb ZL |
2467 | if (bcache_major < 0) { |
2468 | unregister_reboot_notifier(&reboot); | |
330a4db8 | 2469 | mutex_destroy(&bch_register_lock); |
cafe5635 | 2470 | return bcache_major; |
2ecf0cdb | 2471 | } |
cafe5635 | 2472 | |
16c1fdf4 FS |
2473 | bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0); |
2474 | if (!bcache_wq) | |
2475 | goto err; | |
2476 | ||
0f843e65 GF |
2477 | bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); |
2478 | if (!bch_journal_wq) | |
2479 | goto err; | |
2480 | ||
16c1fdf4 FS |
2481 | bcache_kobj = kobject_create_and_add("bcache", fs_kobj); |
2482 | if (!bcache_kobj) | |
2483 | goto err; | |
2484 | ||
2485 | if (bch_request_init() || | |
330a4db8 | 2486 | sysfs_create_files(bcache_kobj, files)) |
cafe5635 KO |
2487 | goto err; |
2488 | ||
91bafdf0 | 2489 | bch_debug_init(); |
78ac2107 CL |
2490 | closure_debug_init(); |
2491 | ||
cafe5635 KO |
2492 | return 0; |
2493 | err: | |
2494 | bcache_exit(); | |
2495 | return -ENOMEM; | |
2496 | } | |
2497 | ||
9aaf5165 CL |
2498 | /* |
2499 | * Module hooks | |
2500 | */ | |
cafe5635 KO |
2501 | module_exit(bcache_exit); |
2502 | module_init(bcache_init); | |
009673d0 | 2503 | |
9aaf5165 CL |
2504 | module_param(bch_cutoff_writeback, uint, 0); |
2505 | MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback"); | |
2506 | ||
2507 | module_param(bch_cutoff_writeback_sync, uint, 0); | |
2508 | MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback"); | |
2509 | ||
009673d0 CL |
2510 | MODULE_DESCRIPTION("Bcache: a Linux block layer cache"); |
2511 | MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); | |
2512 | MODULE_LICENSE("GPL"); |