Commit | Line | Data |
---|---|---|
87418ef9 | 1 | // SPDX-License-Identifier: GPL-2.0 |
cafe5635 KO |
2 | /* |
3 | * bcache setup/teardown code, and some metadata io - read a superblock and | |
4 | * figure out what to do with it. | |
5 | * | |
6 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
7 | * Copyright 2012 Google, Inc. | |
8 | */ | |
9 | ||
10 | #include "bcache.h" | |
11 | #include "btree.h" | |
12 | #include "debug.h" | |
65d45231 | 13 | #include "extents.h" |
cafe5635 | 14 | #include "request.h" |
279afbad | 15 | #include "writeback.h" |
d721a43f | 16 | #include "features.h" |
cafe5635 | 17 | |
c37511b8 | 18 | #include <linux/blkdev.h> |
4ee60ec1 | 19 | #include <linux/pagemap.h> |
cafe5635 | 20 | #include <linux/debugfs.h> |
28935ab5 | 21 | #include <linux/idr.h> |
79826c35 | 22 | #include <linux/kthread.h> |
ee4a36f4 | 23 | #include <linux/workqueue.h> |
cafe5635 KO |
24 | #include <linux/module.h> |
25 | #include <linux/random.h> | |
26 | #include <linux/reboot.h> | |
27 | #include <linux/sysfs.h> | |
28 | ||
9aaf5165 CL |
29 | unsigned int bch_cutoff_writeback; |
30 | unsigned int bch_cutoff_writeback_sync; | |
31 | ||
cafe5635 KO |
32 | static const char bcache_magic[] = { |
33 | 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, | |
34 | 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 | |
35 | }; | |
36 | ||
37 | static const char invalid_uuid[] = { | |
38 | 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, | |
39 | 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 | |
40 | }; | |
41 | ||
cafe5635 KO |
42 | static struct kobject *bcache_kobj; |
43 | struct mutex bch_register_lock; | |
a59ff6cc | 44 | bool bcache_is_reboot; |
cafe5635 KO |
45 | LIST_HEAD(bch_cache_sets); |
46 | static LIST_HEAD(uncached_devices); | |
47 | ||
28935ab5 | 48 | static int bcache_major; |
1dbe32ad | 49 | static DEFINE_IDA(bcache_device_idx); |
cafe5635 KO |
50 | static wait_queue_head_t unregister_wait; |
51 | struct workqueue_struct *bcache_wq; | |
afe78ab4 | 52 | struct workqueue_struct *bch_flush_wq; |
0f843e65 | 53 | struct workqueue_struct *bch_journal_wq; |
cafe5635 | 54 | |
a59ff6cc | 55 | |
cafe5635 | 56 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) |
1dbe32ad CL |
57 | /* limitation of partitions number on single bcache device */ |
58 | #define BCACHE_MINORS 128 | |
59 | /* limitation of bcache devices number on single system */ | |
60 | #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS) | |
cafe5635 | 61 | |
cafe5635 KO |
62 | /* Superblock */ |
63 | ||
ffa47032 CL |
64 | static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s) |
65 | { | |
66 | unsigned int bucket_size = le16_to_cpu(s->bucket_size); | |
67 | ||
b16671e8 CL |
68 | if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) { |
69 | if (bch_has_feature_large_bucket(sb)) { | |
70 | unsigned int max, order; | |
71 | ||
72 | max = sizeof(unsigned int) * BITS_PER_BYTE - 1; | |
73 | order = le16_to_cpu(s->bucket_size); | |
74 | /* | |
75 | * bcache tool will make sure the overflow won't | |
76 | * happen, an error message here is enough. | |
77 | */ | |
78 | if (order > max) | |
79 | pr_err("Bucket size (1 << %u) overflows\n", | |
80 | order); | |
81 | bucket_size = 1 << order; | |
82 | } else if (bch_has_feature_obso_large_bucket(sb)) { | |
83 | bucket_size += | |
84 | le16_to_cpu(s->obso_bucket_size_hi) << 16; | |
85 | } | |
86 | } | |
ffa47032 CL |
87 | |
88 | return bucket_size; | |
89 | } | |
90 | ||
5b21403c CL |
91 | static const char *read_super_common(struct cache_sb *sb, struct block_device *bdev, |
92 | struct cache_sb_disk *s) | |
93 | { | |
94 | const char *err; | |
95 | unsigned int i; | |
96 | ||
198efa35 | 97 | sb->first_bucket= le16_to_cpu(s->first_bucket); |
5b21403c | 98 | sb->nbuckets = le64_to_cpu(s->nbuckets); |
ffa47032 | 99 | sb->bucket_size = get_bucket_size(sb, s); |
5b21403c CL |
100 | |
101 | sb->nr_in_set = le16_to_cpu(s->nr_in_set); | |
102 | sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); | |
103 | ||
198efa35 CL |
104 | err = "Too many journal buckets"; |
105 | if (sb->keys > SB_JOURNAL_BUCKETS) | |
106 | goto err; | |
107 | ||
5b21403c CL |
108 | err = "Too many buckets"; |
109 | if (sb->nbuckets > LONG_MAX) | |
110 | goto err; | |
111 | ||
112 | err = "Not enough buckets"; | |
113 | if (sb->nbuckets < 1 << 7) | |
114 | goto err; | |
115 | ||
c557a5f7 CL |
116 | err = "Bad block size (not power of 2)"; |
117 | if (!is_power_of_2(sb->block_size)) | |
118 | goto err; | |
119 | ||
120 | err = "Bad block size (larger than page size)"; | |
121 | if (sb->block_size > PAGE_SECTORS) | |
122 | goto err; | |
123 | ||
124 | err = "Bad bucket size (not power of 2)"; | |
125 | if (!is_power_of_2(sb->bucket_size)) | |
126 | goto err; | |
127 | ||
128 | err = "Bad bucket size (smaller than page size)"; | |
129 | if (sb->bucket_size < PAGE_SECTORS) | |
5b21403c CL |
130 | goto err; |
131 | ||
132 | err = "Invalid superblock: device too small"; | |
133 | if (get_capacity(bdev->bd_disk) < | |
134 | sb->bucket_size * sb->nbuckets) | |
135 | goto err; | |
136 | ||
137 | err = "Bad UUID"; | |
138 | if (bch_is_zero(sb->set_uuid, 16)) | |
139 | goto err; | |
140 | ||
141 | err = "Bad cache device number in set"; | |
142 | if (!sb->nr_in_set || | |
143 | sb->nr_in_set <= sb->nr_this_dev || | |
144 | sb->nr_in_set > MAX_CACHES_PER_SET) | |
145 | goto err; | |
146 | ||
147 | err = "Journal buckets not sequential"; | |
148 | for (i = 0; i < sb->keys; i++) | |
149 | if (sb->d[i] != sb->first_bucket + i) | |
150 | goto err; | |
151 | ||
152 | err = "Too many journal buckets"; | |
153 | if (sb->first_bucket + sb->keys > sb->nbuckets) | |
154 | goto err; | |
155 | ||
156 | err = "Invalid superblock: first bucket comes before end of super"; | |
157 | if (sb->first_bucket * sb->bucket_size < 16) | |
158 | goto err; | |
159 | ||
160 | err = NULL; | |
161 | err: | |
162 | return err; | |
163 | } | |
164 | ||
165 | ||
cafe5635 | 166 | static const char *read_super(struct cache_sb *sb, struct block_device *bdev, |
cfa0c56d | 167 | struct cache_sb_disk **res) |
cafe5635 KO |
168 | { |
169 | const char *err; | |
a702a692 | 170 | struct cache_sb_disk *s; |
6321bef0 | 171 | struct page *page; |
6f10f7d1 | 172 | unsigned int i; |
cafe5635 | 173 | |
6321bef0 CH |
174 | page = read_cache_page_gfp(bdev->bd_inode->i_mapping, |
175 | SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); | |
176 | if (IS_ERR(page)) | |
cafe5635 | 177 | return "IO error"; |
6321bef0 | 178 | s = page_address(page) + offset_in_page(SB_OFFSET); |
cafe5635 KO |
179 | |
180 | sb->offset = le64_to_cpu(s->offset); | |
181 | sb->version = le64_to_cpu(s->version); | |
182 | ||
183 | memcpy(sb->magic, s->magic, 16); | |
184 | memcpy(sb->uuid, s->uuid, 16); | |
185 | memcpy(sb->set_uuid, s->set_uuid, 16); | |
186 | memcpy(sb->label, s->label, SB_LABEL_SIZE); | |
187 | ||
188 | sb->flags = le64_to_cpu(s->flags); | |
189 | sb->seq = le64_to_cpu(s->seq); | |
cafe5635 | 190 | sb->last_mount = le32_to_cpu(s->last_mount); |
cafe5635 KO |
191 | sb->keys = le16_to_cpu(s->keys); |
192 | ||
193 | for (i = 0; i < SB_JOURNAL_BUCKETS; i++) | |
194 | sb->d[i] = le64_to_cpu(s->d[i]); | |
195 | ||
46f5aa88 | 196 | pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n", |
cafe5635 KO |
197 | sb->version, sb->flags, sb->seq, sb->keys); |
198 | ||
aaf8dbea | 199 | err = "Not a bcache superblock (bad offset)"; |
cafe5635 KO |
200 | if (sb->offset != SB_SECTOR) |
201 | goto err; | |
202 | ||
aaf8dbea | 203 | err = "Not a bcache superblock (bad magic)"; |
cafe5635 KO |
204 | if (memcmp(sb->magic, bcache_magic, 16)) |
205 | goto err; | |
206 | ||
cafe5635 KO |
207 | err = "Bad checksum"; |
208 | if (s->csum != csum_set(s)) | |
209 | goto err; | |
210 | ||
211 | err = "Bad UUID"; | |
169ef1cf | 212 | if (bch_is_zero(sb->uuid, 16)) |
cafe5635 KO |
213 | goto err; |
214 | ||
8abb2a5d KO |
215 | sb->block_size = le16_to_cpu(s->block_size); |
216 | ||
217 | err = "Superblock block size smaller than device block size"; | |
218 | if (sb->block_size << 9 < bdev_logical_block_size(bdev)) | |
219 | goto err; | |
220 | ||
2903381f KO |
221 | switch (sb->version) { |
222 | case BCACHE_SB_VERSION_BDEV: | |
2903381f KO |
223 | sb->data_offset = BDEV_DATA_START_DEFAULT; |
224 | break; | |
225 | case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: | |
d721a43f | 226 | case BCACHE_SB_VERSION_BDEV_WITH_FEATURES: |
2903381f KO |
227 | sb->data_offset = le64_to_cpu(s->data_offset); |
228 | ||
229 | err = "Bad data offset"; | |
230 | if (sb->data_offset < BDEV_DATA_START_DEFAULT) | |
231 | goto err; | |
cafe5635 | 232 | |
2903381f KO |
233 | break; |
234 | case BCACHE_SB_VERSION_CDEV: | |
235 | case BCACHE_SB_VERSION_CDEV_WITH_UUID: | |
5b21403c CL |
236 | err = read_super_common(sb, bdev, s); |
237 | if (err) | |
2903381f | 238 | goto err; |
2903381f | 239 | break; |
d721a43f | 240 | case BCACHE_SB_VERSION_CDEV_WITH_FEATURES: |
ffa47032 CL |
241 | /* |
242 | * Feature bits are needed in read_super_common(), | |
243 | * convert them firstly. | |
244 | */ | |
d721a43f CL |
245 | sb->feature_compat = le64_to_cpu(s->feature_compat); |
246 | sb->feature_incompat = le64_to_cpu(s->feature_incompat); | |
247 | sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat); | |
1dfc0686 CL |
248 | |
249 | /* Check incompatible features */ | |
250 | err = "Unsupported compatible feature found"; | |
251 | if (bch_has_unknown_compat_features(sb)) | |
252 | goto err; | |
253 | ||
254 | err = "Unsupported read-only compatible feature found"; | |
255 | if (bch_has_unknown_ro_compat_features(sb)) | |
256 | goto err; | |
257 | ||
258 | err = "Unsupported incompatible feature found"; | |
259 | if (bch_has_unknown_incompat_features(sb)) | |
260 | goto err; | |
261 | ||
ffa47032 CL |
262 | err = read_super_common(sb, bdev, s); |
263 | if (err) | |
2903381f | 264 | goto err; |
2903381f KO |
265 | break; |
266 | default: | |
267 | err = "Unsupported superblock version"; | |
cafe5635 | 268 | goto err; |
2903381f KO |
269 | } |
270 | ||
75cbb3f1 | 271 | sb->last_mount = (u32)ktime_get_real_seconds(); |
cfa0c56d | 272 | *res = s; |
6321bef0 | 273 | return NULL; |
cafe5635 | 274 | err: |
6321bef0 | 275 | put_page(page); |
cafe5635 KO |
276 | return err; |
277 | } | |
278 | ||
4246a0b6 | 279 | static void write_bdev_super_endio(struct bio *bio) |
cafe5635 KO |
280 | { |
281 | struct cached_dev *dc = bio->bi_private; | |
08ec1e62 CL |
282 | |
283 | if (bio->bi_status) | |
284 | bch_count_backing_io_errors(dc, bio); | |
cafe5635 | 285 | |
cb7a583e | 286 | closure_put(&dc->sb_write); |
cafe5635 KO |
287 | } |
288 | ||
475389ae CH |
289 | static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, |
290 | struct bio *bio) | |
cafe5635 | 291 | { |
6f10f7d1 | 292 | unsigned int i; |
cafe5635 | 293 | |
475389ae | 294 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META; |
4f024f37 | 295 | bio->bi_iter.bi_sector = SB_SECTOR; |
475389ae CH |
296 | __bio_add_page(bio, virt_to_page(out), SB_SIZE, |
297 | offset_in_page(out)); | |
cafe5635 KO |
298 | |
299 | out->offset = cpu_to_le64(sb->offset); | |
cafe5635 KO |
300 | |
301 | memcpy(out->uuid, sb->uuid, 16); | |
302 | memcpy(out->set_uuid, sb->set_uuid, 16); | |
303 | memcpy(out->label, sb->label, SB_LABEL_SIZE); | |
304 | ||
305 | out->flags = cpu_to_le64(sb->flags); | |
306 | out->seq = cpu_to_le64(sb->seq); | |
307 | ||
308 | out->last_mount = cpu_to_le32(sb->last_mount); | |
309 | out->first_bucket = cpu_to_le16(sb->first_bucket); | |
310 | out->keys = cpu_to_le16(sb->keys); | |
311 | ||
312 | for (i = 0; i < sb->keys; i++) | |
313 | out->d[i] = cpu_to_le64(sb->d[i]); | |
314 | ||
d721a43f CL |
315 | if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) { |
316 | out->feature_compat = cpu_to_le64(sb->feature_compat); | |
317 | out->feature_incompat = cpu_to_le64(sb->feature_incompat); | |
318 | out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat); | |
319 | } | |
320 | ||
321 | out->version = cpu_to_le64(sb->version); | |
cafe5635 KO |
322 | out->csum = csum_set(out); |
323 | ||
46f5aa88 | 324 | pr_debug("ver %llu, flags %llu, seq %llu\n", |
cafe5635 KO |
325 | sb->version, sb->flags, sb->seq); |
326 | ||
4e49ea4a | 327 | submit_bio(bio); |
cafe5635 KO |
328 | } |
329 | ||
cb7a583e KO |
330 | static void bch_write_bdev_super_unlock(struct closure *cl) |
331 | { | |
332 | struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); | |
333 | ||
334 | up(&dc->sb_write_mutex); | |
335 | } | |
336 | ||
cafe5635 KO |
337 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) |
338 | { | |
cb7a583e | 339 | struct closure *cl = &dc->sb_write; |
cafe5635 KO |
340 | struct bio *bio = &dc->sb_bio; |
341 | ||
cb7a583e KO |
342 | down(&dc->sb_write_mutex); |
343 | closure_init(cl, parent); | |
cafe5635 | 344 | |
49add496 | 345 | bio_init(bio, dc->bdev, dc->sb_bv, 1, 0); |
cafe5635 KO |
346 | bio->bi_end_io = write_bdev_super_endio; |
347 | bio->bi_private = dc; | |
348 | ||
349 | closure_get(cl); | |
27a40ab9 | 350 | /* I/O request sent to backing device */ |
475389ae | 351 | __write_super(&dc->sb, dc->sb_disk, bio); |
cafe5635 | 352 | |
cb7a583e | 353 | closure_return_with_destructor(cl, bch_write_bdev_super_unlock); |
cafe5635 KO |
354 | } |
355 | ||
4246a0b6 | 356 | static void write_super_endio(struct bio *bio) |
cafe5635 KO |
357 | { |
358 | struct cache *ca = bio->bi_private; | |
359 | ||
5138ac67 CL |
360 | /* is_read = 0 */ |
361 | bch_count_io_errors(ca, bio->bi_status, 0, | |
362 | "writing superblock"); | |
cb7a583e KO |
363 | closure_put(&ca->set->sb_write); |
364 | } | |
365 | ||
366 | static void bcache_write_super_unlock(struct closure *cl) | |
367 | { | |
368 | struct cache_set *c = container_of(cl, struct cache_set, sb_write); | |
369 | ||
370 | up(&c->sb_write_mutex); | |
cafe5635 KO |
371 | } |
372 | ||
373 | void bcache_write_super(struct cache_set *c) | |
374 | { | |
cb7a583e | 375 | struct closure *cl = &c->sb_write; |
08fdb2cd CL |
376 | struct cache *ca = c->cache; |
377 | struct bio *bio = &ca->sb_bio; | |
378 | unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID; | |
cafe5635 | 379 | |
cb7a583e KO |
380 | down(&c->sb_write_mutex); |
381 | closure_init(cl, &c->cl); | |
cafe5635 | 382 | |
4a784266 | 383 | ca->sb.seq++; |
cafe5635 | 384 | |
4a784266 CL |
385 | if (ca->sb.version < version) |
386 | ca->sb.version = version; | |
cafe5635 | 387 | |
49add496 | 388 | bio_init(bio, ca->bdev, ca->sb_bv, 1, 0); |
08fdb2cd CL |
389 | bio->bi_end_io = write_super_endio; |
390 | bio->bi_private = ca; | |
cafe5635 | 391 | |
08fdb2cd CL |
392 | closure_get(cl); |
393 | __write_super(&ca->sb, ca->sb_disk, bio); | |
cafe5635 | 394 | |
cb7a583e | 395 | closure_return_with_destructor(cl, bcache_write_super_unlock); |
cafe5635 KO |
396 | } |
397 | ||
398 | /* UUID io */ | |
399 | ||
4246a0b6 | 400 | static void uuid_endio(struct bio *bio) |
cafe5635 KO |
401 | { |
402 | struct closure *cl = bio->bi_private; | |
cb7a583e | 403 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); |
cafe5635 | 404 | |
4e4cbee9 | 405 | cache_set_err_on(bio->bi_status, c, "accessing uuids"); |
cafe5635 KO |
406 | bch_bbio_free(bio, c); |
407 | closure_put(cl); | |
408 | } | |
409 | ||
cb7a583e KO |
410 | static void uuid_io_unlock(struct closure *cl) |
411 | { | |
412 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write); | |
413 | ||
414 | up(&c->uuid_write_mutex); | |
415 | } | |
416 | ||
ad0d9e76 | 417 | static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, |
cafe5635 KO |
418 | struct bkey *k, struct closure *parent) |
419 | { | |
cb7a583e | 420 | struct closure *cl = &c->uuid_write; |
cafe5635 | 421 | struct uuid_entry *u; |
6f10f7d1 | 422 | unsigned int i; |
85b1492e | 423 | char buf[80]; |
cafe5635 KO |
424 | |
425 | BUG_ON(!parent); | |
cb7a583e KO |
426 | down(&c->uuid_write_mutex); |
427 | closure_init(cl, parent); | |
cafe5635 KO |
428 | |
429 | for (i = 0; i < KEY_PTRS(k); i++) { | |
430 | struct bio *bio = bch_bbio_alloc(c); | |
431 | ||
1eff9d32 | 432 | bio->bi_opf = REQ_SYNC | REQ_META | op_flags; |
4f024f37 | 433 | bio->bi_iter.bi_size = KEY_SIZE(k) << 9; |
cafe5635 KO |
434 | |
435 | bio->bi_end_io = uuid_endio; | |
436 | bio->bi_private = cl; | |
ad0d9e76 | 437 | bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); |
169ef1cf | 438 | bch_bio_map(bio, c->uuids); |
cafe5635 KO |
439 | |
440 | bch_submit_bbio(bio, c, k, i); | |
441 | ||
ad0d9e76 | 442 | if (op != REQ_OP_WRITE) |
cafe5635 KO |
443 | break; |
444 | } | |
445 | ||
dc9d98d6 | 446 | bch_extent_to_text(buf, sizeof(buf), k); |
46f5aa88 | 447 | pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf); |
cafe5635 KO |
448 | |
449 | for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) | |
169ef1cf | 450 | if (!bch_is_zero(u->uuid, 16)) |
46f5aa88 | 451 | pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n", |
cafe5635 KO |
452 | u - c->uuids, u->uuid, u->label, |
453 | u->first_reg, u->last_reg, u->invalidated); | |
454 | ||
cb7a583e | 455 | closure_return_with_destructor(cl, uuid_io_unlock); |
cafe5635 KO |
456 | } |
457 | ||
458 | static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) | |
459 | { | |
460 | struct bkey *k = &j->uuid_bucket; | |
461 | ||
65d45231 | 462 | if (__bch_btree_ptr_invalid(c, k)) |
cafe5635 KO |
463 | return "bad uuid pointer"; |
464 | ||
465 | bkey_copy(&c->uuid_bucket, k); | |
70fd7614 | 466 | uuid_io(c, REQ_OP_READ, 0, k, cl); |
cafe5635 KO |
467 | |
468 | if (j->version < BCACHE_JSET_VERSION_UUIDv1) { | |
469 | struct uuid_entry_v0 *u0 = (void *) c->uuids; | |
470 | struct uuid_entry *u1 = (void *) c->uuids; | |
471 | int i; | |
472 | ||
473 | closure_sync(cl); | |
474 | ||
475 | /* | |
476 | * Since the new uuid entry is bigger than the old, we have to | |
477 | * convert starting at the highest memory address and work down | |
478 | * in order to do it in place | |
479 | */ | |
480 | ||
481 | for (i = c->nr_uuids - 1; | |
482 | i >= 0; | |
483 | --i) { | |
484 | memcpy(u1[i].uuid, u0[i].uuid, 16); | |
485 | memcpy(u1[i].label, u0[i].label, 32); | |
486 | ||
487 | u1[i].first_reg = u0[i].first_reg; | |
488 | u1[i].last_reg = u0[i].last_reg; | |
489 | u1[i].invalidated = u0[i].invalidated; | |
490 | ||
491 | u1[i].flags = 0; | |
492 | u1[i].sectors = 0; | |
493 | } | |
494 | } | |
495 | ||
496 | return NULL; | |
497 | } | |
498 | ||
499 | static int __uuid_write(struct cache_set *c) | |
500 | { | |
501 | BKEY_PADDED(key) k; | |
502 | struct closure cl; | |
4a784266 | 503 | struct cache *ca = c->cache; |
21e478dd | 504 | unsigned int size; |
cafe5635 | 505 | |
1fae7cf0 | 506 | closure_init_stack(&cl); |
cafe5635 KO |
507 | lockdep_assert_held(&bch_register_lock); |
508 | ||
17e4aed8 | 509 | if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true)) |
cafe5635 KO |
510 | return 1; |
511 | ||
4a784266 | 512 | size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS; |
21e478dd | 513 | SET_KEY_SIZE(&k.key, size); |
ad0d9e76 | 514 | uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); |
cafe5635 KO |
515 | closure_sync(&cl); |
516 | ||
7a55948d | 517 | /* Only one bucket used for uuid write */ |
7a55948d SW |
518 | atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written); |
519 | ||
cafe5635 | 520 | bkey_copy(&c->uuid_bucket, &k.key); |
3a3b6a4e | 521 | bkey_put(c, &k.key); |
cafe5635 KO |
522 | return 0; |
523 | } | |
524 | ||
525 | int bch_uuid_write(struct cache_set *c) | |
526 | { | |
527 | int ret = __uuid_write(c); | |
528 | ||
529 | if (!ret) | |
530 | bch_journal_meta(c, NULL); | |
531 | ||
532 | return ret; | |
533 | } | |
534 | ||
535 | static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) | |
536 | { | |
537 | struct uuid_entry *u; | |
538 | ||
539 | for (u = c->uuids; | |
540 | u < c->uuids + c->nr_uuids; u++) | |
541 | if (!memcmp(u->uuid, uuid, 16)) | |
542 | return u; | |
543 | ||
544 | return NULL; | |
545 | } | |
546 | ||
547 | static struct uuid_entry *uuid_find_empty(struct cache_set *c) | |
548 | { | |
549 | static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; | |
1fae7cf0 | 550 | |
cafe5635 KO |
551 | return uuid_find(c, zero_uuid); |
552 | } | |
553 | ||
554 | /* | |
555 | * Bucket priorities/gens: | |
556 | * | |
557 | * For each bucket, we store on disk its | |
3be11dba CL |
558 | * 8 bit gen |
559 | * 16 bit priority | |
cafe5635 KO |
560 | * |
561 | * See alloc.c for an explanation of the gen. The priority is used to implement | |
562 | * lru (and in the future other) cache replacement policies; for most purposes | |
563 | * it's just an opaque integer. | |
564 | * | |
565 | * The gens and the priorities don't have a whole lot to do with each other, and | |
566 | * it's actually the gens that must be written out at specific times - it's no | |
567 | * big deal if the priorities don't get written, if we lose them we just reuse | |
568 | * buckets in suboptimal order. | |
569 | * | |
570 | * On disk they're stored in a packed array, and in as many buckets are required | |
571 | * to fit them all. The buckets we use to store them form a list; the journal | |
572 | * header points to the first bucket, the first bucket points to the second | |
573 | * bucket, et cetera. | |
574 | * | |
575 | * This code is used by the allocation code; periodically (whenever it runs out | |
576 | * of buckets to allocate from) the allocation code will invalidate some | |
577 | * buckets, but it can't use those buckets until their new gens are safely on | |
578 | * disk. | |
579 | */ | |
580 | ||
4246a0b6 | 581 | static void prio_endio(struct bio *bio) |
cafe5635 KO |
582 | { |
583 | struct cache *ca = bio->bi_private; | |
584 | ||
4e4cbee9 | 585 | cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); |
cafe5635 KO |
586 | bch_bbio_free(bio, ca->set); |
587 | closure_put(&ca->prio); | |
588 | } | |
589 | ||
ad0d9e76 MC |
590 | static void prio_io(struct cache *ca, uint64_t bucket, int op, |
591 | unsigned long op_flags) | |
cafe5635 KO |
592 | { |
593 | struct closure *cl = &ca->prio; | |
594 | struct bio *bio = bch_bbio_alloc(ca->set); | |
595 | ||
596 | closure_init_stack(cl); | |
597 | ||
4f024f37 | 598 | bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; |
74d46992 | 599 | bio_set_dev(bio, ca->bdev); |
c954ac8d | 600 | bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb); |
cafe5635 KO |
601 | |
602 | bio->bi_end_io = prio_endio; | |
603 | bio->bi_private = ca; | |
ad0d9e76 | 604 | bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); |
169ef1cf | 605 | bch_bio_map(bio, ca->disk_buckets); |
cafe5635 | 606 | |
771f393e | 607 | closure_bio_submit(ca->set, bio, &ca->prio); |
cafe5635 KO |
608 | closure_sync(cl); |
609 | } | |
610 | ||
84c529ae | 611 | int bch_prio_write(struct cache *ca, bool wait) |
cafe5635 KO |
612 | { |
613 | int i; | |
614 | struct bucket *b; | |
615 | struct closure cl; | |
616 | ||
46f5aa88 | 617 | pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n", |
84c529ae AR |
618 | fifo_used(&ca->free[RESERVE_PRIO]), |
619 | fifo_used(&ca->free[RESERVE_NONE]), | |
620 | fifo_used(&ca->free_inc)); | |
621 | ||
622 | /* | |
623 | * Pre-check if there are enough free buckets. In the non-blocking | |
624 | * scenario it's better to fail early rather than starting to allocate | |
625 | * buckets and do a cleanup later in case of failure. | |
626 | */ | |
627 | if (!wait) { | |
628 | size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) + | |
629 | fifo_used(&ca->free[RESERVE_NONE]); | |
630 | if (prio_buckets(ca) > avail) | |
631 | return -ENOMEM; | |
632 | } | |
633 | ||
cafe5635 KO |
634 | closure_init_stack(&cl); |
635 | ||
636 | lockdep_assert_held(&ca->set->bucket_lock); | |
637 | ||
cafe5635 KO |
638 | ca->disk_buckets->seq++; |
639 | ||
640 | atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), | |
641 | &ca->meta_sectors_written); | |
642 | ||
cafe5635 KO |
643 | for (i = prio_buckets(ca) - 1; i >= 0; --i) { |
644 | long bucket; | |
645 | struct prio_set *p = ca->disk_buckets; | |
b1a67b0f KO |
646 | struct bucket_disk *d = p->data; |
647 | struct bucket_disk *end = d + prios_per_bucket(ca); | |
cafe5635 KO |
648 | |
649 | for (b = ca->buckets + i * prios_per_bucket(ca); | |
650 | b < ca->buckets + ca->sb.nbuckets && d < end; | |
651 | b++, d++) { | |
652 | d->prio = cpu_to_le16(b->prio); | |
653 | d->gen = b->gen; | |
654 | } | |
655 | ||
656 | p->next_bucket = ca->prio_buckets[i + 1]; | |
81ab4190 | 657 | p->magic = pset_magic(&ca->sb); |
c954ac8d | 658 | p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8); |
cafe5635 | 659 | |
84c529ae | 660 | bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait); |
cafe5635 KO |
661 | BUG_ON(bucket == -1); |
662 | ||
663 | mutex_unlock(&ca->set->bucket_lock); | |
ad0d9e76 | 664 | prio_io(ca, bucket, REQ_OP_WRITE, 0); |
cafe5635 KO |
665 | mutex_lock(&ca->set->bucket_lock); |
666 | ||
667 | ca->prio_buckets[i] = bucket; | |
668 | atomic_dec_bug(&ca->buckets[bucket].pin); | |
669 | } | |
670 | ||
671 | mutex_unlock(&ca->set->bucket_lock); | |
672 | ||
673 | bch_journal_meta(ca->set, &cl); | |
674 | closure_sync(&cl); | |
675 | ||
676 | mutex_lock(&ca->set->bucket_lock); | |
677 | ||
cafe5635 KO |
678 | /* |
679 | * Don't want the old priorities to get garbage collected until after we | |
680 | * finish writing the new ones, and they're journalled | |
681 | */ | |
2531d9ee KO |
682 | for (i = 0; i < prio_buckets(ca); i++) { |
683 | if (ca->prio_last_buckets[i]) | |
684 | __bch_bucket_free(ca, | |
685 | &ca->buckets[ca->prio_last_buckets[i]]); | |
686 | ||
cafe5635 | 687 | ca->prio_last_buckets[i] = ca->prio_buckets[i]; |
2531d9ee | 688 | } |
84c529ae | 689 | return 0; |
cafe5635 KO |
690 | } |
691 | ||
49d08d59 | 692 | static int prio_read(struct cache *ca, uint64_t bucket) |
cafe5635 KO |
693 | { |
694 | struct prio_set *p = ca->disk_buckets; | |
695 | struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; | |
696 | struct bucket *b; | |
6f10f7d1 | 697 | unsigned int bucket_nr = 0; |
49d08d59 | 698 | int ret = -EIO; |
cafe5635 KO |
699 | |
700 | for (b = ca->buckets; | |
701 | b < ca->buckets + ca->sb.nbuckets; | |
702 | b++, d++) { | |
703 | if (d == end) { | |
704 | ca->prio_buckets[bucket_nr] = bucket; | |
705 | ca->prio_last_buckets[bucket_nr] = bucket; | |
706 | bucket_nr++; | |
707 | ||
70fd7614 | 708 | prio_io(ca, bucket, REQ_OP_READ, 0); |
cafe5635 | 709 | |
b0d30981 | 710 | if (p->csum != |
c954ac8d | 711 | bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) { |
46f5aa88 | 712 | pr_warn("bad csum reading priorities\n"); |
49d08d59 CL |
713 | goto out; |
714 | } | |
cafe5635 | 715 | |
49d08d59 | 716 | if (p->magic != pset_magic(&ca->sb)) { |
46f5aa88 | 717 | pr_warn("bad magic reading priorities\n"); |
49d08d59 CL |
718 | goto out; |
719 | } | |
cafe5635 KO |
720 | |
721 | bucket = p->next_bucket; | |
722 | d = p->data; | |
723 | } | |
724 | ||
725 | b->prio = le16_to_cpu(d->prio); | |
3a2fd9d5 | 726 | b->gen = b->last_gc = d->gen; |
cafe5635 | 727 | } |
49d08d59 CL |
728 | |
729 | ret = 0; | |
730 | out: | |
731 | return ret; | |
cafe5635 KO |
732 | } |
733 | ||
734 | /* Bcache device */ | |
735 | ||
736 | static int open_dev(struct block_device *b, fmode_t mode) | |
737 | { | |
738 | struct bcache_device *d = b->bd_disk->private_data; | |
1fae7cf0 | 739 | |
c4d951dd | 740 | if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) |
cafe5635 KO |
741 | return -ENXIO; |
742 | ||
743 | closure_get(&d->cl); | |
744 | return 0; | |
745 | } | |
746 | ||
867e1162 | 747 | static void release_dev(struct gendisk *b, fmode_t mode) |
cafe5635 KO |
748 | { |
749 | struct bcache_device *d = b->private_data; | |
1fae7cf0 | 750 | |
cafe5635 | 751 | closure_put(&d->cl); |
cafe5635 KO |
752 | } |
753 | ||
754 | static int ioctl_dev(struct block_device *b, fmode_t mode, | |
755 | unsigned int cmd, unsigned long arg) | |
756 | { | |
757 | struct bcache_device *d = b->bd_disk->private_data; | |
0f0709e6 | 758 | |
cafe5635 KO |
759 | return d->ioctl(d, mode, cmd, arg); |
760 | } | |
761 | ||
c62b37d9 CH |
762 | static const struct block_device_operations bcache_cached_ops = { |
763 | .submit_bio = cached_dev_submit_bio, | |
764 | .open = open_dev, | |
765 | .release = release_dev, | |
766 | .ioctl = ioctl_dev, | |
767 | .owner = THIS_MODULE, | |
768 | }; | |
769 | ||
770 | static const struct block_device_operations bcache_flash_ops = { | |
771 | .submit_bio = flash_dev_submit_bio, | |
cafe5635 KO |
772 | .open = open_dev, |
773 | .release = release_dev, | |
774 | .ioctl = ioctl_dev, | |
775 | .owner = THIS_MODULE, | |
776 | }; | |
777 | ||
778 | void bcache_device_stop(struct bcache_device *d) | |
779 | { | |
c4d951dd | 780 | if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) |
63d63b51 CL |
781 | /* |
782 | * closure_fn set to | |
783 | * - cached device: cached_dev_flush() | |
784 | * - flash dev: flash_dev_flush() | |
785 | */ | |
cafe5635 KO |
786 | closure_queue(&d->cl); |
787 | } | |
788 | ||
ee668506 KO |
789 | static void bcache_device_unlink(struct bcache_device *d) |
790 | { | |
c4d951dd | 791 | lockdep_assert_held(&bch_register_lock); |
ee668506 | 792 | |
c4d951dd | 793 | if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { |
08fdb2cd | 794 | struct cache *ca = d->c->cache; |
ee668506 | 795 | |
c4d951dd KO |
796 | sysfs_remove_link(&d->c->kobj, d->name); |
797 | sysfs_remove_link(&d->kobj, "cache"); | |
798 | ||
08fdb2cd | 799 | bd_unlink_disk_holder(ca->bdev, d->disk); |
c4d951dd | 800 | } |
ee668506 KO |
801 | } |
802 | ||
803 | static void bcache_device_link(struct bcache_device *d, struct cache_set *c, | |
804 | const char *name) | |
805 | { | |
08fdb2cd | 806 | struct cache *ca = c->cache; |
4b6efb4b | 807 | int ret; |
ee668506 | 808 | |
08fdb2cd | 809 | bd_link_disk_holder(ca->bdev, d->disk); |
ee668506 KO |
810 | |
811 | snprintf(d->name, BCACHEDEVNAME_SIZE, | |
812 | "%s%u", name, d->id); | |
813 | ||
4b6efb4b CL |
814 | ret = sysfs_create_link(&d->kobj, &c->kobj, "cache"); |
815 | if (ret < 0) | |
46f5aa88 | 816 | pr_err("Couldn't create device -> cache set symlink\n"); |
4b6efb4b CL |
817 | |
818 | ret = sysfs_create_link(&c->kobj, &d->kobj, d->name); | |
819 | if (ret < 0) | |
46f5aa88 | 820 | pr_err("Couldn't create cache set -> device symlink\n"); |
fecaee6f ZL |
821 | |
822 | clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); | |
ee668506 KO |
823 | } |
824 | ||
cafe5635 KO |
825 | static void bcache_device_detach(struct bcache_device *d) |
826 | { | |
827 | lockdep_assert_held(&bch_register_lock); | |
828 | ||
ea8c5356 CL |
829 | atomic_dec(&d->c->attached_dev_nr); |
830 | ||
c4d951dd | 831 | if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { |
cafe5635 KO |
832 | struct uuid_entry *u = d->c->uuids + d->id; |
833 | ||
834 | SET_UUID_FLASH_ONLY(u, 0); | |
835 | memcpy(u->uuid, invalid_uuid, 16); | |
75cbb3f1 | 836 | u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); |
cafe5635 | 837 | bch_uuid_write(d->c); |
cafe5635 KO |
838 | } |
839 | ||
c4d951dd | 840 | bcache_device_unlink(d); |
ee668506 | 841 | |
cafe5635 KO |
842 | d->c->devices[d->id] = NULL; |
843 | closure_put(&d->c->caching); | |
844 | d->c = NULL; | |
845 | } | |
846 | ||
847 | static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, | |
6f10f7d1 | 848 | unsigned int id) |
cafe5635 | 849 | { |
cafe5635 KO |
850 | d->id = id; |
851 | d->c = c; | |
852 | c->devices[id] = d; | |
853 | ||
2831231d CL |
854 | if (id >= c->devices_max_used) |
855 | c->devices_max_used = id + 1; | |
856 | ||
cafe5635 KO |
857 | closure_get(&c->caching); |
858 | } | |
859 | ||
1dbe32ad CL |
860 | static inline int first_minor_to_idx(int first_minor) |
861 | { | |
862 | return (first_minor/BCACHE_MINORS); | |
863 | } | |
864 | ||
865 | static inline int idx_to_first_minor(int idx) | |
866 | { | |
867 | return (idx * BCACHE_MINORS); | |
868 | } | |
869 | ||
cafe5635 KO |
870 | static void bcache_device_free(struct bcache_device *d) |
871 | { | |
2d886951 CL |
872 | struct gendisk *disk = d->disk; |
873 | ||
cafe5635 KO |
874 | lockdep_assert_held(&bch_register_lock); |
875 | ||
2d886951 | 876 | if (disk) |
46f5aa88 | 877 | pr_info("%s stopped\n", disk->disk_name); |
2d886951 | 878 | else |
46f5aa88 | 879 | pr_err("bcache device (NULL gendisk) stopped\n"); |
cafe5635 KO |
880 | |
881 | if (d->c) | |
882 | bcache_device_detach(d); | |
2d886951 CL |
883 | |
884 | if (disk) { | |
1dbe32ad | 885 | ida_simple_remove(&bcache_device_idx, |
2d886951 | 886 | first_minor_to_idx(disk->first_minor)); |
8468f450 | 887 | blk_cleanup_disk(disk); |
28935ab5 | 888 | } |
cafe5635 | 889 | |
d19936a2 | 890 | bioset_exit(&d->bio_split); |
958b4338 PE |
891 | kvfree(d->full_dirty_stripes); |
892 | kvfree(d->stripe_sectors_dirty); | |
cafe5635 KO |
893 | |
894 | closure_debug_destroy(&d->cl); | |
895 | } | |
896 | ||
6f10f7d1 | 897 | static int bcache_device_init(struct bcache_device *d, unsigned int block_size, |
c62b37d9 CH |
898 | sector_t sectors, struct block_device *cached_bdev, |
899 | const struct block_device_operations *ops) | |
cafe5635 KO |
900 | { |
901 | struct request_queue *q; | |
5f2b18ec BVA |
902 | const size_t max_stripes = min_t(size_t, INT_MAX, |
903 | SIZE_MAX / sizeof(atomic_t)); | |
65f0f017 | 904 | uint64_t n; |
1dbe32ad | 905 | int idx; |
279afbad | 906 | |
2d679fc7 KO |
907 | if (!d->stripe_size) |
908 | d->stripe_size = 1 << 31; | |
279afbad | 909 | |
65f0f017 CL |
910 | n = DIV_ROUND_UP_ULL(sectors, d->stripe_size); |
911 | if (!n || n > max_stripes) { | |
912 | pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n", | |
913 | n); | |
279afbad | 914 | return -ENOMEM; |
48a915a8 | 915 | } |
65f0f017 | 916 | d->nr_stripes = n; |
279afbad KO |
917 | |
918 | n = d->nr_stripes * sizeof(atomic_t); | |
bc4e54f6 | 919 | d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); |
279afbad KO |
920 | if (!d->stripe_sectors_dirty) |
921 | return -ENOMEM; | |
cafe5635 | 922 | |
48a915a8 | 923 | n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); |
bc4e54f6 | 924 | d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); |
48a915a8 | 925 | if (!d->full_dirty_stripes) |
224b0683 | 926 | goto out_free_stripe_sectors_dirty; |
48a915a8 | 927 | |
1dbe32ad CL |
928 | idx = ida_simple_get(&bcache_device_idx, 0, |
929 | BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); | |
930 | if (idx < 0) | |
224b0683 | 931 | goto out_free_full_dirty_stripes; |
b8c0d911 | 932 | |
d19936a2 | 933 | if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio), |
9b4e9f5a | 934 | BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) |
224b0683 | 935 | goto out_ida_remove; |
9b4e9f5a | 936 | |
bc70852f | 937 | d->disk = blk_alloc_disk(NUMA_NO_NODE); |
9b4e9f5a | 938 | if (!d->disk) |
224b0683 | 939 | goto out_bioset_exit; |
cafe5635 | 940 | |
279afbad | 941 | set_capacity(d->disk, sectors); |
1dbe32ad | 942 | snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); |
cafe5635 KO |
943 | |
944 | d->disk->major = bcache_major; | |
1dbe32ad | 945 | d->disk->first_minor = idx_to_first_minor(idx); |
bc70852f | 946 | d->disk->minors = BCACHE_MINORS; |
c62b37d9 | 947 | d->disk->fops = ops; |
cafe5635 KO |
948 | d->disk->private_data = d; |
949 | ||
bc70852f | 950 | q = d->disk->queue; |
cafe5635 KO |
951 | q->limits.max_hw_sectors = UINT_MAX; |
952 | q->limits.max_sectors = UINT_MAX; | |
953 | q->limits.max_segment_size = UINT_MAX; | |
a8affc03 | 954 | q->limits.max_segments = BIO_MAX_VECS; |
2bb4cd5c | 955 | blk_queue_max_discard_sectors(q, UINT_MAX); |
90db6919 | 956 | q->limits.discard_granularity = 512; |
cafe5635 KO |
957 | q->limits.io_min = block_size; |
958 | q->limits.logical_block_size = block_size; | |
959 | q->limits.physical_block_size = block_size; | |
dcacbc12 MFO |
960 | |
961 | if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) { | |
962 | /* | |
963 | * This should only happen with BCACHE_SB_VERSION_BDEV. | |
964 | * Block/page size is checked for BCACHE_SB_VERSION_CDEV. | |
965 | */ | |
4b25bbf5 | 966 | pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n", |
dcacbc12 MFO |
967 | d->disk->disk_name, q->limits.logical_block_size, |
968 | PAGE_SIZE, bdev_logical_block_size(cached_bdev)); | |
969 | ||
970 | /* This also adjusts physical block size/min io size if needed */ | |
971 | blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev)); | |
972 | } | |
973 | ||
44e1ebe2 BVA |
974 | blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); |
975 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); | |
cafe5635 | 976 | |
84b4ff9e | 977 | blk_queue_write_cache(q, true, true); |
54d12f2b | 978 | |
cafe5635 | 979 | return 0; |
9b4e9f5a | 980 | |
224b0683 CH |
981 | out_bioset_exit: |
982 | bioset_exit(&d->bio_split); | |
983 | out_ida_remove: | |
9b4e9f5a | 984 | ida_simple_remove(&bcache_device_idx, idx); |
224b0683 CH |
985 | out_free_full_dirty_stripes: |
986 | kvfree(d->full_dirty_stripes); | |
987 | out_free_stripe_sectors_dirty: | |
988 | kvfree(d->stripe_sectors_dirty); | |
9b4e9f5a FS |
989 | return -ENOMEM; |
990 | ||
cafe5635 KO |
991 | } |
992 | ||
993 | /* Cached device */ | |
994 | ||
995 | static void calc_cached_dev_sectors(struct cache_set *c) | |
996 | { | |
997 | uint64_t sectors = 0; | |
998 | struct cached_dev *dc; | |
999 | ||
1000 | list_for_each_entry(dc, &c->cached_devs, list) | |
cda25b82 | 1001 | sectors += bdev_nr_sectors(dc->bdev); |
cafe5635 KO |
1002 | |
1003 | c->cached_dev_sectors = sectors; | |
1004 | } | |
1005 | ||
0f0709e6 CL |
1006 | #define BACKING_DEV_OFFLINE_TIMEOUT 5 |
1007 | static int cached_dev_status_update(void *arg) | |
1008 | { | |
1009 | struct cached_dev *dc = arg; | |
1010 | struct request_queue *q; | |
1011 | ||
1012 | /* | |
1013 | * If this delayed worker is stopping outside, directly quit here. | |
1014 | * dc->io_disable might be set via sysfs interface, so check it | |
1015 | * here too. | |
1016 | */ | |
1017 | while (!kthread_should_stop() && !dc->io_disable) { | |
1018 | q = bdev_get_queue(dc->bdev); | |
1019 | if (blk_queue_dying(q)) | |
1020 | dc->offline_seconds++; | |
1021 | else | |
1022 | dc->offline_seconds = 0; | |
1023 | ||
1024 | if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) { | |
0f5cd781 CH |
1025 | pr_err("%pg: device offline for %d seconds\n", |
1026 | dc->bdev, | |
0f0709e6 | 1027 | BACKING_DEV_OFFLINE_TIMEOUT); |
46f5aa88 JP |
1028 | pr_err("%s: disable I/O request due to backing device offline\n", |
1029 | dc->disk.name); | |
0f0709e6 CL |
1030 | dc->io_disable = true; |
1031 | /* let others know earlier that io_disable is true */ | |
1032 | smp_mb(); | |
1033 | bcache_device_stop(&dc->disk); | |
1034 | break; | |
1035 | } | |
1036 | schedule_timeout_interruptible(HZ); | |
1037 | } | |
1038 | ||
1039 | wait_for_kthread_stop(); | |
1040 | return 0; | |
1041 | } | |
1042 | ||
1043 | ||
0b13efec | 1044 | int bch_cached_dev_run(struct cached_dev *dc) |
cafe5635 | 1045 | { |
13e1db65 | 1046 | int ret = 0; |
cafe5635 | 1047 | struct bcache_device *d = &dc->disk; |
792732d9 | 1048 | char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL); |
a25c32be GP |
1049 | char *env[] = { |
1050 | "DRIVER=bcache", | |
1051 | kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), | |
792732d9 | 1052 | kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""), |
ab9e1400 | 1053 | NULL, |
a25c32be | 1054 | }; |
cafe5635 | 1055 | |
e0faa3d7 | 1056 | if (dc->io_disable) { |
0f5cd781 | 1057 | pr_err("I/O disabled on cached dev %pg\n", dc->bdev); |
13e1db65 ZL |
1058 | ret = -EIO; |
1059 | goto out; | |
e0faa3d7 | 1060 | } |
0b13efec | 1061 | |
4d4d8573 | 1062 | if (atomic_xchg(&dc->running, 1)) { |
0f5cd781 | 1063 | pr_info("cached dev %pg is running already\n", dc->bdev); |
13e1db65 ZL |
1064 | ret = -EBUSY; |
1065 | goto out; | |
4d4d8573 | 1066 | } |
cafe5635 KO |
1067 | |
1068 | if (!d->c && | |
1069 | BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { | |
1070 | struct closure cl; | |
1fae7cf0 | 1071 | |
cafe5635 KO |
1072 | closure_init_stack(&cl); |
1073 | ||
1074 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); | |
1075 | bch_write_bdev_super(dc, &cl); | |
1076 | closure_sync(&cl); | |
1077 | } | |
1078 | ||
2961c3bb LC |
1079 | ret = add_disk(d->disk); |
1080 | if (ret) | |
1081 | goto out; | |
ee668506 | 1082 | bd_link_disk_holder(dc->bdev, dc->disk.disk); |
3be11dba CL |
1083 | /* |
1084 | * won't show up in the uevent file, use udevadm monitor -e instead | |
1085 | * only class / kset properties are persistent | |
1086 | */ | |
cafe5635 | 1087 | kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); |
a25c32be | 1088 | |
cafe5635 | 1089 | if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || |
0b13efec CL |
1090 | sysfs_create_link(&disk_to_dev(d->disk)->kobj, |
1091 | &d->kobj, "bcache")) { | |
46f5aa88 | 1092 | pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n"); |
13e1db65 ZL |
1093 | ret = -ENOMEM; |
1094 | goto out; | |
0b13efec | 1095 | } |
0f0709e6 CL |
1096 | |
1097 | dc->status_update_thread = kthread_run(cached_dev_status_update, | |
1098 | dc, "bcache_status_update"); | |
1099 | if (IS_ERR(dc->status_update_thread)) { | |
46f5aa88 | 1100 | pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n"); |
0f0709e6 | 1101 | } |
0b13efec | 1102 | |
13e1db65 ZL |
1103 | out: |
1104 | kfree(env[1]); | |
1105 | kfree(env[2]); | |
1106 | kfree(buf); | |
1107 | return ret; | |
cafe5635 KO |
1108 | } |
1109 | ||
3fd47bfe CL |
1110 | /* |
1111 | * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed | |
1112 | * work dc->writeback_rate_update is running. Wait until the routine | |
1113 | * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to | |
1114 | * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out | |
1115 | * seconds, give up waiting here and continue to cancel it too. | |
1116 | */ | |
1117 | static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) | |
1118 | { | |
1119 | int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ; | |
1120 | ||
1121 | do { | |
1122 | if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING, | |
1123 | &dc->disk.flags)) | |
1124 | break; | |
1125 | time_out--; | |
1126 | schedule_timeout_interruptible(1); | |
1127 | } while (time_out > 0); | |
1128 | ||
1129 | if (time_out == 0) | |
46f5aa88 | 1130 | pr_warn("give up waiting for dc->writeback_write_update to quit\n"); |
3fd47bfe CL |
1131 | |
1132 | cancel_delayed_work_sync(&dc->writeback_rate_update); | |
1133 | } | |
1134 | ||
cafe5635 KO |
1135 | static void cached_dev_detach_finish(struct work_struct *w) |
1136 | { | |
1137 | struct cached_dev *dc = container_of(w, struct cached_dev, detach); | |
aa97f6cd | 1138 | struct cache_set *c = dc->disk.c; |
cafe5635 | 1139 | |
c4d951dd | 1140 | BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); |
3b304d24 | 1141 | BUG_ON(refcount_read(&dc->count)); |
cafe5635 | 1142 | |
cafe5635 | 1143 | |
3fd47bfe CL |
1144 | if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) |
1145 | cancel_writeback_rate_update_dwork(dc); | |
1146 | ||
8d29c442 TJ |
1147 | if (!IS_ERR_OR_NULL(dc->writeback_thread)) { |
1148 | kthread_stop(dc->writeback_thread); | |
1149 | dc->writeback_thread = NULL; | |
1150 | } | |
1151 | ||
97ba3b81 CL |
1152 | mutex_lock(&bch_register_lock); |
1153 | ||
cafe5635 KO |
1154 | bcache_device_detach(&dc->disk); |
1155 | list_move(&dc->list, &uncached_devices); | |
aa97f6cd | 1156 | calc_cached_dev_sectors(c); |
cafe5635 | 1157 | |
c4d951dd | 1158 | clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); |
5b1016e6 | 1159 | clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); |
c4d951dd | 1160 | |
cafe5635 KO |
1161 | mutex_unlock(&bch_register_lock); |
1162 | ||
0f5cd781 | 1163 | pr_info("Caching disabled for %pg\n", dc->bdev); |
cafe5635 KO |
1164 | |
1165 | /* Drop ref we took in cached_dev_detach() */ | |
1166 | closure_put(&dc->disk.cl); | |
1167 | } | |
1168 | ||
1169 | void bch_cached_dev_detach(struct cached_dev *dc) | |
1170 | { | |
1171 | lockdep_assert_held(&bch_register_lock); | |
1172 | ||
c4d951dd | 1173 | if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) |
cafe5635 KO |
1174 | return; |
1175 | ||
c4d951dd | 1176 | if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) |
cafe5635 KO |
1177 | return; |
1178 | ||
1179 | /* | |
1180 | * Block the device from being closed and freed until we're finished | |
1181 | * detaching | |
1182 | */ | |
1183 | closure_get(&dc->disk.cl); | |
1184 | ||
1185 | bch_writeback_queue(dc); | |
3fd47bfe | 1186 | |
cafe5635 KO |
1187 | cached_dev_put(dc); |
1188 | } | |
1189 | ||
73ac105b TJ |
1190 | int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, |
1191 | uint8_t *set_uuid) | |
cafe5635 | 1192 | { |
75cbb3f1 | 1193 | uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds()); |
cafe5635 | 1194 | struct uuid_entry *u; |
86755b7a | 1195 | struct cached_dev *exist_dc, *t; |
0b13efec | 1196 | int ret = 0; |
cafe5635 | 1197 | |
1132e56e CL |
1198 | if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) || |
1199 | (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16))) | |
cafe5635 KO |
1200 | return -ENOENT; |
1201 | ||
1202 | if (dc->disk.c) { | |
0f5cd781 | 1203 | pr_err("Can't attach %pg: already attached\n", dc->bdev); |
cafe5635 KO |
1204 | return -EINVAL; |
1205 | } | |
1206 | ||
1207 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) { | |
0f5cd781 | 1208 | pr_err("Can't attach %pg: shutting down\n", dc->bdev); |
cafe5635 KO |
1209 | return -EINVAL; |
1210 | } | |
1211 | ||
4a784266 | 1212 | if (dc->sb.block_size < c->cache->sb.block_size) { |
cafe5635 | 1213 | /* Will die */ |
0f5cd781 CH |
1214 | pr_err("Couldn't attach %pg: block size less than set's block size\n", |
1215 | dc->bdev); | |
cafe5635 KO |
1216 | return -EINVAL; |
1217 | } | |
1218 | ||
86755b7a ML |
1219 | /* Check whether already attached */ |
1220 | list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { | |
1221 | if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { | |
0f5cd781 CH |
1222 | pr_err("Tried to attach %pg but duplicate UUID already attached\n", |
1223 | dc->bdev); | |
86755b7a ML |
1224 | |
1225 | return -EINVAL; | |
1226 | } | |
1227 | } | |
1228 | ||
cafe5635 KO |
1229 | u = uuid_find(c, dc->sb.uuid); |
1230 | ||
1231 | if (u && | |
1232 | (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || | |
1233 | BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { | |
1234 | memcpy(u->uuid, invalid_uuid, 16); | |
75cbb3f1 | 1235 | u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); |
cafe5635 KO |
1236 | u = NULL; |
1237 | } | |
1238 | ||
1239 | if (!u) { | |
1240 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { | |
0f5cd781 | 1241 | pr_err("Couldn't find uuid for %pg in set\n", dc->bdev); |
cafe5635 KO |
1242 | return -ENOENT; |
1243 | } | |
1244 | ||
1245 | u = uuid_find_empty(c); | |
1246 | if (!u) { | |
0f5cd781 | 1247 | pr_err("Not caching %pg, no room for UUID\n", dc->bdev); |
cafe5635 KO |
1248 | return -EINVAL; |
1249 | } | |
1250 | } | |
1251 | ||
3be11dba CL |
1252 | /* |
1253 | * Deadlocks since we're called via sysfs... | |
1254 | * sysfs_remove_file(&dc->kobj, &sysfs_attach); | |
cafe5635 KO |
1255 | */ |
1256 | ||
169ef1cf | 1257 | if (bch_is_zero(u->uuid, 16)) { |
cafe5635 | 1258 | struct closure cl; |
1fae7cf0 | 1259 | |
cafe5635 KO |
1260 | closure_init_stack(&cl); |
1261 | ||
1262 | memcpy(u->uuid, dc->sb.uuid, 16); | |
1263 | memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); | |
1264 | u->first_reg = u->last_reg = rtime; | |
1265 | bch_uuid_write(c); | |
1266 | ||
1132e56e | 1267 | memcpy(dc->sb.set_uuid, c->set_uuid, 16); |
cafe5635 KO |
1268 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); |
1269 | ||
1270 | bch_write_bdev_super(dc, &cl); | |
1271 | closure_sync(&cl); | |
1272 | } else { | |
1273 | u->last_reg = rtime; | |
1274 | bch_uuid_write(c); | |
1275 | } | |
1276 | ||
1277 | bcache_device_attach(&dc->disk, c, u - c->uuids); | |
cafe5635 KO |
1278 | list_move(&dc->list, &c->cached_devs); |
1279 | calc_cached_dev_sectors(c); | |
1280 | ||
cafe5635 KO |
1281 | /* |
1282 | * dc->c must be set before dc->count != 0 - paired with the mb in | |
1283 | * cached_dev_get() | |
1284 | */ | |
eb2b3d03 | 1285 | smp_wmb(); |
3b304d24 | 1286 | refcount_set(&dc->count, 1); |
cafe5635 | 1287 | |
07cc6ef8 EW |
1288 | /* Block writeback thread, but spawn it */ |
1289 | down_write(&dc->writeback_lock); | |
1290 | if (bch_cached_dev_writeback_start(dc)) { | |
1291 | up_write(&dc->writeback_lock); | |
46f5aa88 | 1292 | pr_err("Couldn't start writeback facilities for %s\n", |
633bb2ce | 1293 | dc->disk.disk->disk_name); |
9e5c3535 | 1294 | return -ENOMEM; |
07cc6ef8 | 1295 | } |
9e5c3535 | 1296 | |
cafe5635 KO |
1297 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { |
1298 | atomic_set(&dc->has_dirty, 1); | |
cafe5635 KO |
1299 | bch_writeback_queue(dc); |
1300 | } | |
1301 | ||
2e17a262 TJ |
1302 | bch_sectors_dirty_init(&dc->disk); |
1303 | ||
0b13efec CL |
1304 | ret = bch_cached_dev_run(dc); |
1305 | if (ret && (ret != -EBUSY)) { | |
1306 | up_write(&dc->writeback_lock); | |
5c2a634c CL |
1307 | /* |
1308 | * bch_register_lock is held, bcache_device_stop() is not | |
1309 | * able to be directly called. The kthread and kworker | |
1310 | * created previously in bch_cached_dev_writeback_start() | |
1311 | * have to be stopped manually here. | |
1312 | */ | |
1313 | kthread_stop(dc->writeback_thread); | |
1314 | cancel_writeback_rate_update_dwork(dc); | |
0f5cd781 | 1315 | pr_err("Couldn't run cached device %pg\n", dc->bdev); |
0b13efec CL |
1316 | return ret; |
1317 | } | |
1318 | ||
ee668506 | 1319 | bcache_device_link(&dc->disk, c, "bdev"); |
ea8c5356 | 1320 | atomic_inc(&c->attached_dev_nr); |
cafe5635 | 1321 | |
5342fd42 CL |
1322 | if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) { |
1323 | pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n"); | |
1324 | pr_err("Please update to the latest bcache-tools to create the cache device\n"); | |
1325 | set_disk_ro(dc->disk.disk, 1); | |
1326 | } | |
1327 | ||
07cc6ef8 EW |
1328 | /* Allow the writeback thread to proceed */ |
1329 | up_write(&dc->writeback_lock); | |
1330 | ||
0f5cd781 CH |
1331 | pr_info("Caching %pg as %s on set %pU\n", |
1332 | dc->bdev, | |
6e916a7e | 1333 | dc->disk.disk->disk_name, |
1132e56e | 1334 | dc->disk.c->set_uuid); |
cafe5635 KO |
1335 | return 0; |
1336 | } | |
1337 | ||
2d17456e | 1338 | /* when dc->disk.kobj released */ |
cafe5635 KO |
1339 | void bch_cached_dev_release(struct kobject *kobj) |
1340 | { | |
1341 | struct cached_dev *dc = container_of(kobj, struct cached_dev, | |
1342 | disk.kobj); | |
1343 | kfree(dc); | |
1344 | module_put(THIS_MODULE); | |
1345 | } | |
1346 | ||
1347 | static void cached_dev_free(struct closure *cl) | |
1348 | { | |
1349 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); | |
1350 | ||
3fd47bfe CL |
1351 | if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) |
1352 | cancel_writeback_rate_update_dwork(dc); | |
1353 | ||
a664d0f0 SP |
1354 | if (!IS_ERR_OR_NULL(dc->writeback_thread)) |
1355 | kthread_stop(dc->writeback_thread); | |
0f0709e6 CL |
1356 | if (!IS_ERR_OR_NULL(dc->status_update_thread)) |
1357 | kthread_stop(dc->status_update_thread); | |
cafe5635 | 1358 | |
80265d8d CL |
1359 | mutex_lock(&bch_register_lock); |
1360 | ||
b75f4aed | 1361 | if (atomic_read(&dc->running)) { |
f59fce84 | 1362 | bd_unlink_disk_holder(dc->bdev, dc->disk.disk); |
b75f4aed CH |
1363 | del_gendisk(dc->disk.disk); |
1364 | } | |
cafe5635 KO |
1365 | bcache_device_free(&dc->disk); |
1366 | list_del(&dc->list); | |
1367 | ||
1368 | mutex_unlock(&bch_register_lock); | |
1369 | ||
475389ae CH |
1370 | if (dc->sb_disk) |
1371 | put_page(virt_to_page(dc->sb_disk)); | |
e8547d42 | 1372 | |
0781c874 | 1373 | if (!IS_ERR_OR_NULL(dc->bdev)) |
cafe5635 | 1374 | blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
cafe5635 KO |
1375 | |
1376 | wake_up(&unregister_wait); | |
1377 | ||
1378 | kobject_put(&dc->disk.kobj); | |
1379 | } | |
1380 | ||
1381 | static void cached_dev_flush(struct closure *cl) | |
1382 | { | |
1383 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); | |
1384 | struct bcache_device *d = &dc->disk; | |
1385 | ||
c9502ea4 | 1386 | mutex_lock(&bch_register_lock); |
c4d951dd | 1387 | bcache_device_unlink(d); |
c9502ea4 KO |
1388 | mutex_unlock(&bch_register_lock); |
1389 | ||
cafe5635 KO |
1390 | bch_cache_accounting_destroy(&dc->accounting); |
1391 | kobject_del(&d->kobj); | |
1392 | ||
1393 | continue_at(cl, cached_dev_free, system_wq); | |
1394 | } | |
1395 | ||
6f10f7d1 | 1396 | static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) |
cafe5635 | 1397 | { |
f59fce84 | 1398 | int ret; |
cafe5635 | 1399 | struct io *io; |
f59fce84 | 1400 | struct request_queue *q = bdev_get_queue(dc->bdev); |
cafe5635 KO |
1401 | |
1402 | __module_get(THIS_MODULE); | |
1403 | INIT_LIST_HEAD(&dc->list); | |
f59fce84 KO |
1404 | closure_init(&dc->disk.cl, NULL); |
1405 | set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); | |
cafe5635 | 1406 | kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); |
cafe5635 | 1407 | INIT_WORK(&dc->detach, cached_dev_detach_finish); |
cb7a583e | 1408 | sema_init(&dc->sb_write_mutex, 1); |
f59fce84 KO |
1409 | INIT_LIST_HEAD(&dc->io_lru); |
1410 | spin_lock_init(&dc->io_lock); | |
1411 | bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); | |
cafe5635 | 1412 | |
cafe5635 KO |
1413 | dc->sequential_cutoff = 4 << 20; |
1414 | ||
cafe5635 KO |
1415 | for (io = dc->io; io < dc->io + RECENT_IO; io++) { |
1416 | list_add(&io->lru, &dc->io_lru); | |
1417 | hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); | |
1418 | } | |
1419 | ||
c78afc62 KO |
1420 | dc->disk.stripe_size = q->limits.io_opt >> 9; |
1421 | ||
1422 | if (dc->disk.stripe_size) | |
1423 | dc->partial_stripes_expensive = | |
1424 | q->limits.raid_partial_stripes_expensive; | |
1425 | ||
279afbad | 1426 | ret = bcache_device_init(&dc->disk, block_size, |
a782483c | 1427 | bdev_nr_sectors(dc->bdev) - dc->sb.data_offset, |
c62b37d9 | 1428 | dc->bdev, &bcache_cached_ops); |
f59fce84 KO |
1429 | if (ret) |
1430 | return ret; | |
1431 | ||
5d4ce78b CH |
1432 | blk_queue_io_opt(dc->disk.disk->queue, |
1433 | max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q))); | |
f59fce84 | 1434 | |
c7b7bd07 CL |
1435 | atomic_set(&dc->io_errors, 0); |
1436 | dc->io_disable = false; | |
1437 | dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; | |
7e027ca4 CL |
1438 | /* default to auto */ |
1439 | dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; | |
1440 | ||
f59fce84 KO |
1441 | bch_cached_dev_request_init(dc); |
1442 | bch_cached_dev_writeback_init(dc); | |
cafe5635 | 1443 | return 0; |
cafe5635 KO |
1444 | } |
1445 | ||
1446 | /* Cached device - bcache superblock */ | |
1447 | ||
cfa0c56d | 1448 | static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, |
cafe5635 KO |
1449 | struct block_device *bdev, |
1450 | struct cached_dev *dc) | |
1451 | { | |
cafe5635 | 1452 | const char *err = "cannot allocate memory"; |
cafe5635 | 1453 | struct cache_set *c; |
0b13efec | 1454 | int ret = -ENOMEM; |
cafe5635 | 1455 | |
cafe5635 | 1456 | memcpy(&dc->sb, sb, sizeof(struct cache_sb)); |
cafe5635 KO |
1457 | dc->bdev = bdev; |
1458 | dc->bdev->bd_holder = dc; | |
475389ae | 1459 | dc->sb_disk = sb_disk; |
6e916a7e | 1460 | |
f59fce84 KO |
1461 | if (cached_dev_init(dc, sb->block_size << 9)) |
1462 | goto err; | |
cafe5635 KO |
1463 | |
1464 | err = "error creating kobject"; | |
8d65269f | 1465 | if (kobject_add(&dc->disk.kobj, bdev_kobj(bdev), "bcache")) |
cafe5635 KO |
1466 | goto err; |
1467 | if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) | |
1468 | goto err; | |
1469 | ||
0f5cd781 | 1470 | pr_info("registered backing device %pg\n", dc->bdev); |
f59fce84 | 1471 | |
cafe5635 | 1472 | list_add(&dc->list, &uncached_devices); |
e57fd746 | 1473 | /* attach to a matched cache set if it exists */ |
cafe5635 | 1474 | list_for_each_entry(c, &bch_cache_sets, list) |
73ac105b | 1475 | bch_cached_dev_attach(dc, c, NULL); |
cafe5635 KO |
1476 | |
1477 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || | |
0b13efec CL |
1478 | BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) { |
1479 | err = "failed to run cached device"; | |
1480 | ret = bch_cached_dev_run(dc); | |
1481 | if (ret) | |
1482 | goto err; | |
1483 | } | |
cafe5635 | 1484 | |
88c12d42 | 1485 | return 0; |
cafe5635 | 1486 | err: |
0f5cd781 | 1487 | pr_notice("error %pg: %s\n", dc->bdev, err); |
f59fce84 | 1488 | bcache_device_stop(&dc->disk); |
0b13efec | 1489 | return ret; |
cafe5635 KO |
1490 | } |
1491 | ||
1492 | /* Flash only volumes */ | |
1493 | ||
2d17456e | 1494 | /* When d->kobj released */ |
cafe5635 KO |
1495 | void bch_flash_dev_release(struct kobject *kobj) |
1496 | { | |
1497 | struct bcache_device *d = container_of(kobj, struct bcache_device, | |
1498 | kobj); | |
1499 | kfree(d); | |
1500 | } | |
1501 | ||
1502 | static void flash_dev_free(struct closure *cl) | |
1503 | { | |
1504 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); | |
1fae7cf0 | 1505 | |
e5112201 | 1506 | mutex_lock(&bch_register_lock); |
99a27d59 TJ |
1507 | atomic_long_sub(bcache_dev_sectors_dirty(d), |
1508 | &d->c->flash_dev_dirty_sectors); | |
b75f4aed | 1509 | del_gendisk(d->disk); |
cafe5635 | 1510 | bcache_device_free(d); |
e5112201 | 1511 | mutex_unlock(&bch_register_lock); |
cafe5635 KO |
1512 | kobject_put(&d->kobj); |
1513 | } | |
1514 | ||
1515 | static void flash_dev_flush(struct closure *cl) | |
1516 | { | |
1517 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); | |
1518 | ||
e5112201 | 1519 | mutex_lock(&bch_register_lock); |
ee668506 | 1520 | bcache_device_unlink(d); |
e5112201 | 1521 | mutex_unlock(&bch_register_lock); |
cafe5635 KO |
1522 | kobject_del(&d->kobj); |
1523 | continue_at(cl, flash_dev_free, system_wq); | |
1524 | } | |
1525 | ||
1526 | static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) | |
1527 | { | |
2961c3bb | 1528 | int err = -ENOMEM; |
cafe5635 KO |
1529 | struct bcache_device *d = kzalloc(sizeof(struct bcache_device), |
1530 | GFP_KERNEL); | |
1531 | if (!d) | |
2961c3bb | 1532 | goto err_ret; |
cafe5635 KO |
1533 | |
1534 | closure_init(&d->cl, NULL); | |
1535 | set_closure_fn(&d->cl, flash_dev_flush, system_wq); | |
1536 | ||
1537 | kobject_init(&d->kobj, &bch_flash_dev_ktype); | |
1538 | ||
4e1ebae3 | 1539 | if (bcache_device_init(d, block_bytes(c->cache), u->sectors, |
c62b37d9 | 1540 | NULL, &bcache_flash_ops)) |
cafe5635 KO |
1541 | goto err; |
1542 | ||
1543 | bcache_device_attach(d, c, u - c->uuids); | |
175206cf | 1544 | bch_sectors_dirty_init(d); |
cafe5635 | 1545 | bch_flash_dev_request_init(d); |
2961c3bb LC |
1546 | err = add_disk(d->disk); |
1547 | if (err) | |
1548 | goto err; | |
cafe5635 | 1549 | |
2961c3bb LC |
1550 | err = kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"); |
1551 | if (err) | |
cafe5635 KO |
1552 | goto err; |
1553 | ||
1554 | bcache_device_link(d, c, "volume"); | |
1555 | ||
5342fd42 CL |
1556 | if (bch_has_feature_obso_large_bucket(&c->cache->sb)) { |
1557 | pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n"); | |
1558 | pr_err("Please update to the latest bcache-tools to create the cache device\n"); | |
1559 | set_disk_ro(d->disk, 1); | |
1560 | } | |
1561 | ||
cafe5635 KO |
1562 | return 0; |
1563 | err: | |
1564 | kobject_put(&d->kobj); | |
2961c3bb LC |
1565 | err_ret: |
1566 | return err; | |
cafe5635 KO |
1567 | } |
1568 | ||
1569 | static int flash_devs_run(struct cache_set *c) | |
1570 | { | |
1571 | int ret = 0; | |
1572 | struct uuid_entry *u; | |
1573 | ||
1574 | for (u = c->uuids; | |
02aa8a8b | 1575 | u < c->uuids + c->nr_uuids && !ret; |
cafe5635 KO |
1576 | u++) |
1577 | if (UUID_FLASH_ONLY(u)) | |
1578 | ret = flash_dev_run(c, u); | |
1579 | ||
1580 | return ret; | |
1581 | } | |
1582 | ||
1583 | int bch_flash_dev_create(struct cache_set *c, uint64_t size) | |
1584 | { | |
1585 | struct uuid_entry *u; | |
1586 | ||
1587 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) | |
1588 | return -EINTR; | |
1589 | ||
bf0c55c9 SP |
1590 | if (!test_bit(CACHE_SET_RUNNING, &c->flags)) |
1591 | return -EPERM; | |
1592 | ||
cafe5635 KO |
1593 | u = uuid_find_empty(c); |
1594 | if (!u) { | |
46f5aa88 | 1595 | pr_err("Can't create volume, no room for UUID\n"); |
cafe5635 KO |
1596 | return -EINVAL; |
1597 | } | |
1598 | ||
1599 | get_random_bytes(u->uuid, 16); | |
1600 | memset(u->label, 0, 32); | |
75cbb3f1 | 1601 | u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds()); |
cafe5635 KO |
1602 | |
1603 | SET_UUID_FLASH_ONLY(u, 1); | |
1604 | u->sectors = size >> 9; | |
1605 | ||
1606 | bch_uuid_write(c); | |
1607 | ||
1608 | return flash_dev_run(c, u); | |
1609 | } | |
1610 | ||
c7b7bd07 CL |
1611 | bool bch_cached_dev_error(struct cached_dev *dc) |
1612 | { | |
c7b7bd07 CL |
1613 | if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) |
1614 | return false; | |
1615 | ||
1616 | dc->io_disable = true; | |
1617 | /* make others know io_disable is true earlier */ | |
1618 | smp_mb(); | |
1619 | ||
0f5cd781 CH |
1620 | pr_err("stop %s: too many IO errors on backing device %pg\n", |
1621 | dc->disk.disk->disk_name, dc->bdev); | |
c7b7bd07 CL |
1622 | |
1623 | bcache_device_stop(&dc->disk); | |
1624 | return true; | |
1625 | } | |
1626 | ||
cafe5635 KO |
1627 | /* Cache set */ |
1628 | ||
1629 | __printf(2, 3) | |
1630 | bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) | |
1631 | { | |
46f5aa88 | 1632 | struct va_format vaf; |
cafe5635 KO |
1633 | va_list args; |
1634 | ||
77c320eb KO |
1635 | if (c->on_error != ON_ERROR_PANIC && |
1636 | test_bit(CACHE_SET_STOPPING, &c->flags)) | |
cafe5635 KO |
1637 | return false; |
1638 | ||
771f393e | 1639 | if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) |
46f5aa88 | 1640 | pr_info("CACHE_SET_IO_DISABLE already set\n"); |
771f393e | 1641 | |
3be11dba CL |
1642 | /* |
1643 | * XXX: we can be called from atomic context | |
1644 | * acquire_console_sem(); | |
1645 | */ | |
cafe5635 | 1646 | |
cafe5635 | 1647 | va_start(args, fmt); |
cafe5635 | 1648 | |
46f5aa88 JP |
1649 | vaf.fmt = fmt; |
1650 | vaf.va = &args; | |
1651 | ||
1652 | pr_err("error on %pU: %pV, disabling caching\n", | |
1132e56e | 1653 | c->set_uuid, &vaf); |
46f5aa88 JP |
1654 | |
1655 | va_end(args); | |
cafe5635 | 1656 | |
77c320eb KO |
1657 | if (c->on_error == ON_ERROR_PANIC) |
1658 | panic("panic forced after error\n"); | |
1659 | ||
cafe5635 KO |
1660 | bch_cache_set_unregister(c); |
1661 | return true; | |
1662 | } | |
1663 | ||
2d17456e | 1664 | /* When c->kobj released */ |
cafe5635 KO |
1665 | void bch_cache_set_release(struct kobject *kobj) |
1666 | { | |
1667 | struct cache_set *c = container_of(kobj, struct cache_set, kobj); | |
1fae7cf0 | 1668 | |
cafe5635 KO |
1669 | kfree(c); |
1670 | module_put(THIS_MODULE); | |
1671 | } | |
1672 | ||
1673 | static void cache_set_free(struct closure *cl) | |
1674 | { | |
1675 | struct cache_set *c = container_of(cl, struct cache_set, cl); | |
1676 | struct cache *ca; | |
cafe5635 | 1677 | |
ae171023 | 1678 | debugfs_remove(c->debug); |
cafe5635 KO |
1679 | |
1680 | bch_open_buckets_free(c); | |
1681 | bch_btree_cache_free(c); | |
1682 | bch_journal_free(c); | |
1683 | ||
a4b732a2 | 1684 | mutex_lock(&bch_register_lock); |
4a784266 CL |
1685 | bch_bset_sort_state_free(&c->sort); |
1686 | free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb))); | |
1687 | ||
08fdb2cd CL |
1688 | ca = c->cache; |
1689 | if (ca) { | |
1690 | ca->set = NULL; | |
1691 | c->cache = NULL; | |
1692 | kobject_put(&ca->kobj); | |
1693 | } | |
cafe5635 | 1694 | |
cafe5635 | 1695 | |
da415a09 NS |
1696 | if (c->moving_gc_wq) |
1697 | destroy_workqueue(c->moving_gc_wq); | |
d19936a2 KO |
1698 | bioset_exit(&c->bio_split); |
1699 | mempool_exit(&c->fill_iter); | |
1700 | mempool_exit(&c->bio_meta); | |
1701 | mempool_exit(&c->search); | |
cafe5635 KO |
1702 | kfree(c->devices); |
1703 | ||
cafe5635 KO |
1704 | list_del(&c->list); |
1705 | mutex_unlock(&bch_register_lock); | |
1706 | ||
1132e56e | 1707 | pr_info("Cache set %pU unregistered\n", c->set_uuid); |
cafe5635 KO |
1708 | wake_up(&unregister_wait); |
1709 | ||
1710 | closure_debug_destroy(&c->cl); | |
1711 | kobject_put(&c->kobj); | |
1712 | } | |
1713 | ||
1714 | static void cache_set_flush(struct closure *cl) | |
1715 | { | |
1716 | struct cache_set *c = container_of(cl, struct cache_set, caching); | |
08fdb2cd | 1717 | struct cache *ca = c->cache; |
cafe5635 | 1718 | struct btree *b; |
cafe5635 KO |
1719 | |
1720 | bch_cache_accounting_destroy(&c->accounting); | |
1721 | ||
1722 | kobject_put(&c->internal); | |
1723 | kobject_del(&c->kobj); | |
1724 | ||
b387e9b5 | 1725 | if (!IS_ERR_OR_NULL(c->gc_thread)) |
72a44517 KO |
1726 | kthread_stop(c->gc_thread); |
1727 | ||
cafe5635 KO |
1728 | if (!IS_ERR_OR_NULL(c->root)) |
1729 | list_add(&c->root->list, &c->btree_cache); | |
1730 | ||
e6dcbd3e CL |
1731 | /* |
1732 | * Avoid flushing cached nodes if cache set is retiring | |
1733 | * due to too many I/O errors detected. | |
1734 | */ | |
1735 | if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags)) | |
1736 | list_for_each_entry(b, &c->btree_cache, list) { | |
1737 | mutex_lock(&b->write_lock); | |
1738 | if (btree_node_dirty(b)) | |
1739 | __bch_btree_node_write(b, NULL); | |
1740 | mutex_unlock(&b->write_lock); | |
1741 | } | |
cafe5635 | 1742 | |
08fdb2cd CL |
1743 | if (ca->alloc_thread) |
1744 | kthread_stop(ca->alloc_thread); | |
79826c35 | 1745 | |
5b1016e6 KO |
1746 | if (c->journal.cur) { |
1747 | cancel_delayed_work_sync(&c->journal.work); | |
1748 | /* flush last journal entry if needed */ | |
1749 | c->journal.work.work.func(&c->journal.work.work); | |
1750 | } | |
dabb4433 | 1751 | |
cafe5635 KO |
1752 | closure_return(cl); |
1753 | } | |
1754 | ||
7e027ca4 CL |
1755 | /* |
1756 | * This function is only called when CACHE_SET_IO_DISABLE is set, which means | |
1757 | * cache set is unregistering due to too many I/O errors. In this condition, | |
1758 | * the bcache device might be stopped, it depends on stop_when_cache_set_failed | |
1759 | * value and whether the broken cache has dirty data: | |
1760 | * | |
1761 | * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device | |
1762 | * BCH_CACHED_STOP_AUTO 0 NO | |
1763 | * BCH_CACHED_STOP_AUTO 1 YES | |
1764 | * BCH_CACHED_DEV_STOP_ALWAYS 0 YES | |
1765 | * BCH_CACHED_DEV_STOP_ALWAYS 1 YES | |
1766 | * | |
1767 | * The expected behavior is, if stop_when_cache_set_failed is configured to | |
1768 | * "auto" via sysfs interface, the bcache device will not be stopped if the | |
1769 | * backing device is clean on the broken cache device. | |
1770 | */ | |
1771 | static void conditional_stop_bcache_device(struct cache_set *c, | |
1772 | struct bcache_device *d, | |
1773 | struct cached_dev *dc) | |
1774 | { | |
1775 | if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) { | |
46f5aa88 | 1776 | pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n", |
1132e56e | 1777 | d->disk->disk_name, c->set_uuid); |
7e027ca4 CL |
1778 | bcache_device_stop(d); |
1779 | } else if (atomic_read(&dc->has_dirty)) { | |
1780 | /* | |
1781 | * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO | |
1782 | * and dc->has_dirty == 1 | |
1783 | */ | |
46f5aa88 | 1784 | pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n", |
7e027ca4 | 1785 | d->disk->disk_name); |
e8cf978d CIK |
1786 | /* |
1787 | * There might be a small time gap that cache set is | |
1788 | * released but bcache device is not. Inside this time | |
1789 | * gap, regular I/O requests will directly go into | |
1790 | * backing device as no cache set attached to. This | |
1791 | * behavior may also introduce potential inconsistence | |
1792 | * data in writeback mode while cache is dirty. | |
1793 | * Therefore before calling bcache_device_stop() due | |
1794 | * to a broken cache device, dc->io_disable should be | |
1795 | * explicitly set to true. | |
1796 | */ | |
1797 | dc->io_disable = true; | |
1798 | /* make others know io_disable is true earlier */ | |
1799 | smp_mb(); | |
1800 | bcache_device_stop(d); | |
7e027ca4 CL |
1801 | } else { |
1802 | /* | |
1803 | * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO | |
1804 | * and dc->has_dirty == 0 | |
1805 | */ | |
46f5aa88 | 1806 | pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n", |
7e027ca4 CL |
1807 | d->disk->disk_name); |
1808 | } | |
1809 | } | |
1810 | ||
cafe5635 KO |
1811 | static void __cache_set_unregister(struct closure *cl) |
1812 | { | |
1813 | struct cache_set *c = container_of(cl, struct cache_set, caching); | |
5caa52af | 1814 | struct cached_dev *dc; |
7e027ca4 | 1815 | struct bcache_device *d; |
cafe5635 KO |
1816 | size_t i; |
1817 | ||
1818 | mutex_lock(&bch_register_lock); | |
1819 | ||
7e027ca4 CL |
1820 | for (i = 0; i < c->devices_max_used; i++) { |
1821 | d = c->devices[i]; | |
1822 | if (!d) | |
1823 | continue; | |
1824 | ||
1825 | if (!UUID_FLASH_ONLY(&c->uuids[i]) && | |
1826 | test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { | |
1827 | dc = container_of(d, struct cached_dev, disk); | |
1828 | bch_cached_dev_detach(dc); | |
1829 | if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) | |
1830 | conditional_stop_bcache_device(c, d, dc); | |
1831 | } else { | |
1832 | bcache_device_stop(d); | |
5caa52af | 1833 | } |
7e027ca4 | 1834 | } |
cafe5635 KO |
1835 | |
1836 | mutex_unlock(&bch_register_lock); | |
1837 | ||
1838 | continue_at(cl, cache_set_flush, system_wq); | |
1839 | } | |
1840 | ||
1841 | void bch_cache_set_stop(struct cache_set *c) | |
1842 | { | |
1843 | if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) | |
63d63b51 | 1844 | /* closure_fn set to __cache_set_unregister() */ |
cafe5635 KO |
1845 | closure_queue(&c->caching); |
1846 | } | |
1847 | ||
1848 | void bch_cache_set_unregister(struct cache_set *c) | |
1849 | { | |
1850 | set_bit(CACHE_SET_UNREGISTERING, &c->flags); | |
1851 | bch_cache_set_stop(c); | |
1852 | } | |
1853 | ||
de1fafab CL |
1854 | #define alloc_meta_bucket_pages(gfp, sb) \ |
1855 | ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb)))) | |
cafe5635 KO |
1856 | |
1857 | struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) | |
1858 | { | |
1859 | int iter_size; | |
4a784266 | 1860 | struct cache *ca = container_of(sb, struct cache, sb); |
cafe5635 | 1861 | struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); |
1fae7cf0 | 1862 | |
cafe5635 KO |
1863 | if (!c) |
1864 | return NULL; | |
1865 | ||
1866 | __module_get(THIS_MODULE); | |
1867 | closure_init(&c->cl, NULL); | |
1868 | set_closure_fn(&c->cl, cache_set_free, system_wq); | |
1869 | ||
1870 | closure_init(&c->caching, &c->cl); | |
1871 | set_closure_fn(&c->caching, __cache_set_unregister, system_wq); | |
1872 | ||
1873 | /* Maybe create continue_at_noreturn() and use it here? */ | |
1874 | closure_set_stopped(&c->cl); | |
1875 | closure_put(&c->cl); | |
1876 | ||
1877 | kobject_init(&c->kobj, &bch_cache_set_ktype); | |
1878 | kobject_init(&c->internal, &bch_cache_set_internal_ktype); | |
1879 | ||
1880 | bch_cache_accounting_init(&c->accounting, &c->cl); | |
1881 | ||
1132e56e | 1882 | memcpy(c->set_uuid, sb->set_uuid, 16); |
d721a43f | 1883 | |
4a784266 CL |
1884 | c->cache = ca; |
1885 | c->cache->set = c; | |
cafe5635 KO |
1886 | c->bucket_bits = ilog2(sb->bucket_size); |
1887 | c->block_bits = ilog2(sb->block_size); | |
4a784266 | 1888 | c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry); |
2831231d | 1889 | c->devices_max_used = 0; |
ea8c5356 | 1890 | atomic_set(&c->attached_dev_nr, 0); |
4a784266 | 1891 | c->btree_pages = meta_bucket_pages(sb); |
cafe5635 KO |
1892 | if (c->btree_pages > BTREE_MAX_PAGES) |
1893 | c->btree_pages = max_t(int, c->btree_pages / 4, | |
1894 | BTREE_MAX_PAGES); | |
1895 | ||
cb7a583e | 1896 | sema_init(&c->sb_write_mutex, 1); |
e8e1d468 | 1897 | mutex_init(&c->bucket_lock); |
0a63b66d | 1898 | init_waitqueue_head(&c->btree_cache_wait); |
34cf78bf | 1899 | spin_lock_init(&c->btree_cannibalize_lock); |
35fcd848 | 1900 | init_waitqueue_head(&c->bucket_wait); |
be628be0 | 1901 | init_waitqueue_head(&c->gc_wait); |
cb7a583e | 1902 | sema_init(&c->uuid_write_mutex, 1); |
65d22e91 | 1903 | |
65d22e91 KO |
1904 | spin_lock_init(&c->btree_gc_time.lock); |
1905 | spin_lock_init(&c->btree_split_time.lock); | |
1906 | spin_lock_init(&c->btree_read_time.lock); | |
e8e1d468 | 1907 | |
cafe5635 KO |
1908 | bch_moving_init_cache_set(c); |
1909 | ||
1910 | INIT_LIST_HEAD(&c->list); | |
1911 | INIT_LIST_HEAD(&c->cached_devs); | |
1912 | INIT_LIST_HEAD(&c->btree_cache); | |
1913 | INIT_LIST_HEAD(&c->btree_cache_freeable); | |
1914 | INIT_LIST_HEAD(&c->btree_cache_freed); | |
1915 | INIT_LIST_HEAD(&c->data_buckets); | |
1916 | ||
6907dc49 | 1917 | iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) * |
cafe5635 KO |
1918 | sizeof(struct btree_iter_set); |
1919 | ||
a42d3c64 CL |
1920 | c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL); |
1921 | if (!c->devices) | |
1922 | goto err; | |
1923 | ||
1924 | if (mempool_init_slab_pool(&c->search, 32, bch_search_cache)) | |
1925 | goto err; | |
1926 | ||
1927 | if (mempool_init_kmalloc_pool(&c->bio_meta, 2, | |
1928 | sizeof(struct bbio) + | |
4a784266 | 1929 | sizeof(struct bio_vec) * meta_bucket_pages(sb))) |
a42d3c64 CL |
1930 | goto err; |
1931 | ||
1932 | if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size)) | |
1933 | goto err; | |
1934 | ||
1935 | if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), | |
faa8e2c4 | 1936 | BIOSET_NEED_RESCUER)) |
a42d3c64 CL |
1937 | goto err; |
1938 | ||
4a784266 | 1939 | c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb); |
a42d3c64 CL |
1940 | if (!c->uuids) |
1941 | goto err; | |
1942 | ||
1943 | c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0); | |
1944 | if (!c->moving_gc_wq) | |
1945 | goto err; | |
1946 | ||
1947 | if (bch_journal_alloc(c)) | |
1948 | goto err; | |
1949 | ||
1950 | if (bch_btree_cache_alloc(c)) | |
1951 | goto err; | |
1952 | ||
1953 | if (bch_open_buckets_alloc(c)) | |
1954 | goto err; | |
1955 | ||
1956 | if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) | |
cafe5635 KO |
1957 | goto err; |
1958 | ||
cafe5635 KO |
1959 | c->congested_read_threshold_us = 2000; |
1960 | c->congested_write_threshold_us = 20000; | |
7ba0d830 | 1961 | c->error_limit = DEFAULT_IO_ERROR_LIMIT; |
c5fcdedc | 1962 | c->idle_max_writeback_rate_enabled = 1; |
771f393e | 1963 | WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); |
cafe5635 KO |
1964 | |
1965 | return c; | |
1966 | err: | |
1967 | bch_cache_set_unregister(c); | |
1968 | return NULL; | |
1969 | } | |
1970 | ||
ce3e4cfb | 1971 | static int run_cache_set(struct cache_set *c) |
cafe5635 KO |
1972 | { |
1973 | const char *err = "cannot allocate memory"; | |
1974 | struct cached_dev *dc, *t; | |
08fdb2cd | 1975 | struct cache *ca = c->cache; |
c18536a7 | 1976 | struct closure cl; |
95f18c9d SW |
1977 | LIST_HEAD(journal); |
1978 | struct journal_replay *l; | |
cafe5635 | 1979 | |
c18536a7 | 1980 | closure_init_stack(&cl); |
cafe5635 | 1981 | |
08fdb2cd | 1982 | c->nbuckets = ca->sb.nbuckets; |
be628be0 | 1983 | set_gc_sectors(c); |
cafe5635 | 1984 | |
6f9414e0 | 1985 | if (CACHE_SYNC(&c->cache->sb)) { |
cafe5635 KO |
1986 | struct bkey *k; |
1987 | struct jset *j; | |
1988 | ||
1989 | err = "cannot allocate memory for journal"; | |
c18536a7 | 1990 | if (bch_journal_read(c, &journal)) |
cafe5635 KO |
1991 | goto err; |
1992 | ||
46f5aa88 | 1993 | pr_debug("btree_journal_read() done\n"); |
cafe5635 KO |
1994 | |
1995 | err = "no journal entries found"; | |
1996 | if (list_empty(&journal)) | |
1997 | goto err; | |
1998 | ||
1999 | j = &list_entry(journal.prev, struct journal_replay, list)->j; | |
2000 | ||
2001 | err = "IO error reading priorities"; | |
08fdb2cd CL |
2002 | if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev])) |
2003 | goto err; | |
cafe5635 KO |
2004 | |
2005 | /* | |
2006 | * If prio_read() fails it'll call cache_set_error and we'll | |
2007 | * tear everything down right away, but if we perhaps checked | |
2008 | * sooner we could avoid journal replay. | |
2009 | */ | |
2010 | ||
2011 | k = &j->btree_root; | |
2012 | ||
2013 | err = "bad btree root"; | |
65d45231 | 2014 | if (__bch_btree_ptr_invalid(c, k)) |
cafe5635 KO |
2015 | goto err; |
2016 | ||
2017 | err = "error reading btree root"; | |
b0d30981 CL |
2018 | c->root = bch_btree_node_get(c, NULL, k, |
2019 | j->btree_level, | |
2020 | true, NULL); | |
cafe5635 KO |
2021 | if (IS_ERR_OR_NULL(c->root)) |
2022 | goto err; | |
2023 | ||
2024 | list_del_init(&c->root->list); | |
2025 | rw_unlock(true, c->root); | |
2026 | ||
c18536a7 | 2027 | err = uuid_read(c, j, &cl); |
cafe5635 KO |
2028 | if (err) |
2029 | goto err; | |
2030 | ||
2031 | err = "error in recovery"; | |
c18536a7 | 2032 | if (bch_btree_check(c)) |
cafe5635 KO |
2033 | goto err; |
2034 | ||
2035 | bch_journal_mark(c, &journal); | |
2531d9ee | 2036 | bch_initial_gc_finish(c); |
46f5aa88 | 2037 | pr_debug("btree_check() done\n"); |
cafe5635 KO |
2038 | |
2039 | /* | |
2040 | * bcache_journal_next() can't happen sooner, or | |
2041 | * btree_gc_finish() will give spurious errors about last_gc > | |
2042 | * gc_gen - this is a hack but oh well. | |
2043 | */ | |
2044 | bch_journal_next(&c->journal); | |
2045 | ||
119ba0f8 | 2046 | err = "error starting allocator thread"; |
08fdb2cd CL |
2047 | if (bch_cache_allocator_start(ca)) |
2048 | goto err; | |
cafe5635 KO |
2049 | |
2050 | /* | |
2051 | * First place it's safe to allocate: btree_check() and | |
2052 | * btree_gc_finish() have to run before we have buckets to | |
2053 | * allocate, and bch_bucket_alloc_set() might cause a journal | |
2054 | * entry to be written so bcache_journal_next() has to be called | |
2055 | * first. | |
2056 | * | |
2057 | * If the uuids were in the old format we have to rewrite them | |
2058 | * before the next journal entry is written: | |
2059 | */ | |
2060 | if (j->version < BCACHE_JSET_VERSION_UUID) | |
2061 | __uuid_write(c); | |
2062 | ||
ce3e4cfb CL |
2063 | err = "bcache: replay journal failed"; |
2064 | if (bch_journal_replay(c, &journal)) | |
2065 | goto err; | |
cafe5635 | 2066 | } else { |
08fdb2cd | 2067 | unsigned int j; |
cafe5635 | 2068 | |
08fdb2cd CL |
2069 | pr_notice("invalidating existing data\n"); |
2070 | ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, | |
2071 | 2, SB_JOURNAL_BUCKETS); | |
cafe5635 | 2072 | |
08fdb2cd CL |
2073 | for (j = 0; j < ca->sb.keys; j++) |
2074 | ca->sb.d[j] = ca->sb.first_bucket + j; | |
cafe5635 | 2075 | |
2531d9ee | 2076 | bch_initial_gc_finish(c); |
cafe5635 | 2077 | |
119ba0f8 | 2078 | err = "error starting allocator thread"; |
08fdb2cd CL |
2079 | if (bch_cache_allocator_start(ca)) |
2080 | goto err; | |
cafe5635 KO |
2081 | |
2082 | mutex_lock(&c->bucket_lock); | |
08fdb2cd | 2083 | bch_prio_write(ca, true); |
cafe5635 KO |
2084 | mutex_unlock(&c->bucket_lock); |
2085 | ||
cafe5635 KO |
2086 | err = "cannot allocate new UUID bucket"; |
2087 | if (__uuid_write(c)) | |
72a44517 | 2088 | goto err; |
cafe5635 KO |
2089 | |
2090 | err = "cannot allocate new btree root"; | |
2452cc89 | 2091 | c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); |
cafe5635 | 2092 | if (IS_ERR_OR_NULL(c->root)) |
72a44517 | 2093 | goto err; |
cafe5635 | 2094 | |
2a285686 | 2095 | mutex_lock(&c->root->write_lock); |
cafe5635 | 2096 | bkey_copy_key(&c->root->key, &MAX_KEY); |
c18536a7 | 2097 | bch_btree_node_write(c->root, &cl); |
2a285686 | 2098 | mutex_unlock(&c->root->write_lock); |
cafe5635 KO |
2099 | |
2100 | bch_btree_set_root(c->root); | |
2101 | rw_unlock(true, c->root); | |
2102 | ||
2103 | /* | |
2104 | * We don't want to write the first journal entry until | |
2105 | * everything is set up - fortunately journal entries won't be | |
2106 | * written until the SET_CACHE_SYNC() here: | |
2107 | */ | |
6f9414e0 | 2108 | SET_CACHE_SYNC(&c->cache->sb, true); |
cafe5635 KO |
2109 | |
2110 | bch_journal_next(&c->journal); | |
c18536a7 | 2111 | bch_journal_meta(c, &cl); |
cafe5635 KO |
2112 | } |
2113 | ||
72a44517 KO |
2114 | err = "error starting gc thread"; |
2115 | if (bch_gc_thread_start(c)) | |
2116 | goto err; | |
2117 | ||
c18536a7 | 2118 | closure_sync(&cl); |
4a784266 | 2119 | c->cache->sb.last_mount = (u32)ktime_get_real_seconds(); |
cafe5635 KO |
2120 | bcache_write_super(c); |
2121 | ||
5342fd42 CL |
2122 | if (bch_has_feature_obso_large_bucket(&c->cache->sb)) |
2123 | pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n"); | |
2124 | ||
cafe5635 | 2125 | list_for_each_entry_safe(dc, t, &uncached_devices, list) |
73ac105b | 2126 | bch_cached_dev_attach(dc, c, NULL); |
cafe5635 KO |
2127 | |
2128 | flash_devs_run(c); | |
2129 | ||
32feee36 | 2130 | bch_journal_space_reserve(&c->journal); |
bf0c55c9 | 2131 | set_bit(CACHE_SET_RUNNING, &c->flags); |
ce3e4cfb | 2132 | return 0; |
cafe5635 | 2133 | err: |
95f18c9d SW |
2134 | while (!list_empty(&journal)) { |
2135 | l = list_first_entry(&journal, struct journal_replay, list); | |
2136 | list_del(&l->list); | |
2137 | kfree(l); | |
2138 | } | |
2139 | ||
c18536a7 | 2140 | closure_sync(&cl); |
68a53c95 | 2141 | |
c8694948 | 2142 | bch_cache_set_error(c, "%s", err); |
ce3e4cfb CL |
2143 | |
2144 | return -EIO; | |
cafe5635 KO |
2145 | } |
2146 | ||
cafe5635 KO |
2147 | static const char *register_cache_set(struct cache *ca) |
2148 | { | |
2149 | char buf[12]; | |
2150 | const char *err = "cannot allocate memory"; | |
2151 | struct cache_set *c; | |
2152 | ||
2153 | list_for_each_entry(c, &bch_cache_sets, list) | |
1132e56e | 2154 | if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) { |
697e2349 | 2155 | if (c->cache) |
cafe5635 KO |
2156 | return "duplicate cache set member"; |
2157 | ||
cafe5635 KO |
2158 | goto found; |
2159 | } | |
2160 | ||
2161 | c = bch_cache_set_alloc(&ca->sb); | |
2162 | if (!c) | |
2163 | return err; | |
2164 | ||
2165 | err = "error creating kobject"; | |
1132e56e | 2166 | if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) || |
cafe5635 KO |
2167 | kobject_add(&c->internal, &c->kobj, "internal")) |
2168 | goto err; | |
2169 | ||
2170 | if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) | |
2171 | goto err; | |
2172 | ||
2173 | bch_debug_init_cache_set(c); | |
2174 | ||
2175 | list_add(&c->list, &bch_cache_sets); | |
2176 | found: | |
2177 | sprintf(buf, "cache%i", ca->sb.nr_this_dev); | |
2178 | if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || | |
2179 | sysfs_create_link(&c->kobj, &ca->kobj, buf)) | |
2180 | goto err; | |
2181 | ||
d83353b3 | 2182 | kobject_get(&ca->kobj); |
cafe5635 | 2183 | ca->set = c; |
697e2349 | 2184 | ca->set->cache = ca; |
cafe5635 | 2185 | |
697e2349 CL |
2186 | err = "failed to run cache set"; |
2187 | if (run_cache_set(c) < 0) | |
2188 | goto err; | |
cafe5635 KO |
2189 | |
2190 | return NULL; | |
2191 | err: | |
2192 | bch_cache_set_unregister(c); | |
2193 | return err; | |
2194 | } | |
2195 | ||
2196 | /* Cache device */ | |
2197 | ||
2d17456e | 2198 | /* When ca->kobj released */ |
cafe5635 KO |
2199 | void bch_cache_release(struct kobject *kobj) |
2200 | { | |
2201 | struct cache *ca = container_of(kobj, struct cache, kobj); | |
6f10f7d1 | 2202 | unsigned int i; |
cafe5635 | 2203 | |
c9a78332 | 2204 | if (ca->set) { |
697e2349 CL |
2205 | BUG_ON(ca->set->cache != ca); |
2206 | ca->set->cache = NULL; | |
c9a78332 | 2207 | } |
cafe5635 | 2208 | |
c954ac8d | 2209 | free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb))); |
cafe5635 KO |
2210 | kfree(ca->prio_buckets); |
2211 | vfree(ca->buckets); | |
2212 | ||
2213 | free_heap(&ca->heap); | |
cafe5635 | 2214 | free_fifo(&ca->free_inc); |
78365411 KO |
2215 | |
2216 | for (i = 0; i < RESERVE_NR; i++) | |
2217 | free_fifo(&ca->free[i]); | |
cafe5635 | 2218 | |
475389ae CH |
2219 | if (ca->sb_disk) |
2220 | put_page(virt_to_page(ca->sb_disk)); | |
cafe5635 | 2221 | |
0781c874 | 2222 | if (!IS_ERR_OR_NULL(ca->bdev)) |
cafe5635 | 2223 | blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
cafe5635 KO |
2224 | |
2225 | kfree(ca); | |
2226 | module_put(THIS_MODULE); | |
2227 | } | |
2228 | ||
c50d4d5d | 2229 | static int cache_alloc(struct cache *ca) |
cafe5635 KO |
2230 | { |
2231 | size_t free; | |
682811b3 | 2232 | size_t btree_buckets; |
cafe5635 | 2233 | struct bucket *b; |
f6027bca DC |
2234 | int ret = -ENOMEM; |
2235 | const char *err = NULL; | |
cafe5635 | 2236 | |
cafe5635 KO |
2237 | __module_get(THIS_MODULE); |
2238 | kobject_init(&ca->kobj, &bch_cache_ktype); | |
2239 | ||
49add496 | 2240 | bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0); |
cafe5635 | 2241 | |
682811b3 TJ |
2242 | /* |
2243 | * when ca->sb.njournal_buckets is not zero, journal exists, | |
2244 | * and in bch_journal_replay(), tree node may split, | |
2245 | * so bucket of RESERVE_BTREE type is needed, | |
2246 | * the worst situation is all journal buckets are valid journal, | |
2247 | * and all the keys need to replay, | |
2248 | * so the number of RESERVE_BTREE type buckets should be as much | |
2249 | * as journal buckets | |
2250 | */ | |
2251 | btree_buckets = ca->sb.njournal_buckets ?: 8; | |
78365411 | 2252 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; |
3a646fd7 DC |
2253 | if (!free) { |
2254 | ret = -EPERM; | |
2255 | err = "ca->sb.nbuckets is too small"; | |
2256 | goto err_free; | |
2257 | } | |
cafe5635 | 2258 | |
f6027bca DC |
2259 | if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, |
2260 | GFP_KERNEL)) { | |
2261 | err = "ca->free[RESERVE_BTREE] alloc failed"; | |
2262 | goto err_btree_alloc; | |
2263 | } | |
2264 | ||
2265 | if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), | |
2266 | GFP_KERNEL)) { | |
2267 | err = "ca->free[RESERVE_PRIO] alloc failed"; | |
2268 | goto err_prio_alloc; | |
2269 | } | |
2270 | ||
2271 | if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) { | |
2272 | err = "ca->free[RESERVE_MOVINGGC] alloc failed"; | |
2273 | goto err_movinggc_alloc; | |
2274 | } | |
2275 | ||
2276 | if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) { | |
2277 | err = "ca->free[RESERVE_NONE] alloc failed"; | |
2278 | goto err_none_alloc; | |
2279 | } | |
2280 | ||
2281 | if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) { | |
2282 | err = "ca->free_inc alloc failed"; | |
2283 | goto err_free_inc_alloc; | |
2284 | } | |
2285 | ||
2286 | if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) { | |
2287 | err = "ca->heap alloc failed"; | |
2288 | goto err_heap_alloc; | |
2289 | } | |
2290 | ||
2291 | ca->buckets = vzalloc(array_size(sizeof(struct bucket), | |
2292 | ca->sb.nbuckets)); | |
2293 | if (!ca->buckets) { | |
2294 | err = "ca->buckets alloc failed"; | |
2295 | goto err_buckets_alloc; | |
2296 | } | |
2297 | ||
2298 | ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), | |
2299 | prio_buckets(ca), 2), | |
2300 | GFP_KERNEL); | |
2301 | if (!ca->prio_buckets) { | |
2302 | err = "ca->prio_buckets alloc failed"; | |
2303 | goto err_prio_buckets_alloc; | |
2304 | } | |
2305 | ||
c954ac8d | 2306 | ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb); |
f6027bca DC |
2307 | if (!ca->disk_buckets) { |
2308 | err = "ca->disk_buckets alloc failed"; | |
2309 | goto err_disk_buckets_alloc; | |
2310 | } | |
cafe5635 KO |
2311 | |
2312 | ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); | |
2313 | ||
cafe5635 KO |
2314 | for_each_bucket(b, ca) |
2315 | atomic_set(&b->pin, 0); | |
cafe5635 | 2316 | return 0; |
f6027bca DC |
2317 | |
2318 | err_disk_buckets_alloc: | |
2319 | kfree(ca->prio_buckets); | |
2320 | err_prio_buckets_alloc: | |
2321 | vfree(ca->buckets); | |
2322 | err_buckets_alloc: | |
2323 | free_heap(&ca->heap); | |
2324 | err_heap_alloc: | |
2325 | free_fifo(&ca->free_inc); | |
2326 | err_free_inc_alloc: | |
2327 | free_fifo(&ca->free[RESERVE_NONE]); | |
2328 | err_none_alloc: | |
2329 | free_fifo(&ca->free[RESERVE_MOVINGGC]); | |
2330 | err_movinggc_alloc: | |
2331 | free_fifo(&ca->free[RESERVE_PRIO]); | |
2332 | err_prio_alloc: | |
2333 | free_fifo(&ca->free[RESERVE_BTREE]); | |
2334 | err_btree_alloc: | |
3a646fd7 | 2335 | err_free: |
f6027bca DC |
2336 | module_put(THIS_MODULE); |
2337 | if (err) | |
7e84c215 | 2338 | pr_notice("error %pg: %s\n", ca->bdev, err); |
f6027bca | 2339 | return ret; |
cafe5635 KO |
2340 | } |
2341 | ||
cfa0c56d | 2342 | static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, |
c9a78332 | 2343 | struct block_device *bdev, struct cache *ca) |
cafe5635 | 2344 | { |
d9dc1702 | 2345 | const char *err = NULL; /* must be set for any error case */ |
9b299728 | 2346 | int ret = 0; |
cafe5635 | 2347 | |
f59fce84 | 2348 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
cafe5635 KO |
2349 | ca->bdev = bdev; |
2350 | ca->bdev->bd_holder = ca; | |
475389ae | 2351 | ca->sb_disk = sb_disk; |
f59fce84 | 2352 | |
70200574 | 2353 | if (bdev_max_discard_sectors((bdev))) |
cafe5635 KO |
2354 | ca->discard = CACHE_DISCARD(&ca->sb); |
2355 | ||
c50d4d5d | 2356 | ret = cache_alloc(ca); |
d9dc1702 | 2357 | if (ret != 0) { |
bb6d355c CL |
2358 | /* |
2359 | * If we failed here, it means ca->kobj is not initialized yet, | |
2360 | * kobject_put() won't be called and there is no chance to | |
2361 | * call blkdev_put() to bdev in bch_cache_release(). So we | |
2362 | * explicitly call blkdev_put() here. | |
2363 | */ | |
cc40daf9 | 2364 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
d9dc1702 EW |
2365 | if (ret == -ENOMEM) |
2366 | err = "cache_alloc(): -ENOMEM"; | |
3a646fd7 DC |
2367 | else if (ret == -EPERM) |
2368 | err = "cache_alloc(): cache device is too small"; | |
d9dc1702 EW |
2369 | else |
2370 | err = "cache_alloc(): unknown error"; | |
f59fce84 | 2371 | goto err; |
d9dc1702 | 2372 | } |
f59fce84 | 2373 | |
8d65269f | 2374 | if (kobject_add(&ca->kobj, bdev_kobj(bdev), "bcache")) { |
9b299728 EW |
2375 | err = "error calling kobject_add"; |
2376 | ret = -ENOMEM; | |
2377 | goto out; | |
2378 | } | |
cafe5635 | 2379 | |
4fa03402 | 2380 | mutex_lock(&bch_register_lock); |
cafe5635 | 2381 | err = register_cache_set(ca); |
4fa03402 KO |
2382 | mutex_unlock(&bch_register_lock); |
2383 | ||
9b299728 EW |
2384 | if (err) { |
2385 | ret = -ENODEV; | |
2386 | goto out; | |
2387 | } | |
cafe5635 | 2388 | |
7e84c215 | 2389 | pr_info("registered cache device %pg\n", ca->bdev); |
9b299728 | 2390 | |
d83353b3 KO |
2391 | out: |
2392 | kobject_put(&ca->kobj); | |
9b299728 | 2393 | |
cafe5635 | 2394 | err: |
9b299728 | 2395 | if (err) |
7e84c215 | 2396 | pr_notice("error %pg: %s\n", ca->bdev, err); |
9b299728 EW |
2397 | |
2398 | return ret; | |
cafe5635 KO |
2399 | } |
2400 | ||
2401 | /* Global interfaces/init */ | |
2402 | ||
fc2d5988 CL |
2403 | static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, |
2404 | const char *buffer, size_t size); | |
0c277e21 CL |
2405 | static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, |
2406 | struct kobj_attribute *attr, | |
2407 | const char *buffer, size_t size); | |
cafe5635 KO |
2408 | |
2409 | kobj_attribute_write(register, register_bcache); | |
2410 | kobj_attribute_write(register_quiet, register_bcache); | |
0c277e21 | 2411 | kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup); |
cafe5635 | 2412 | |
4e7b5671 | 2413 | static bool bch_is_open_backing(dev_t dev) |
b3cf37bf | 2414 | { |
a9dd53ad GP |
2415 | struct cache_set *c, *tc; |
2416 | struct cached_dev *dc, *t; | |
2417 | ||
2418 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) | |
2419 | list_for_each_entry_safe(dc, t, &c->cached_devs, list) | |
4e7b5671 | 2420 | if (dc->bdev->bd_dev == dev) |
a9dd53ad GP |
2421 | return true; |
2422 | list_for_each_entry_safe(dc, t, &uncached_devices, list) | |
4e7b5671 | 2423 | if (dc->bdev->bd_dev == dev) |
a9dd53ad GP |
2424 | return true; |
2425 | return false; | |
2426 | } | |
2427 | ||
4e7b5671 | 2428 | static bool bch_is_open_cache(dev_t dev) |
b3cf37bf | 2429 | { |
a9dd53ad | 2430 | struct cache_set *c, *tc; |
a9dd53ad | 2431 | |
08fdb2cd CL |
2432 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { |
2433 | struct cache *ca = c->cache; | |
2434 | ||
4e7b5671 | 2435 | if (ca->bdev->bd_dev == dev) |
08fdb2cd CL |
2436 | return true; |
2437 | } | |
2438 | ||
a9dd53ad GP |
2439 | return false; |
2440 | } | |
2441 | ||
4e7b5671 | 2442 | static bool bch_is_open(dev_t dev) |
b3cf37bf | 2443 | { |
4e7b5671 | 2444 | return bch_is_open_cache(dev) || bch_is_open_backing(dev); |
a9dd53ad GP |
2445 | } |
2446 | ||
9e23ccf8 | 2447 | struct async_reg_args { |
ee4a36f4 | 2448 | struct delayed_work reg_work; |
9e23ccf8 CL |
2449 | char *path; |
2450 | struct cache_sb *sb; | |
2451 | struct cache_sb_disk *sb_disk; | |
2452 | struct block_device *bdev; | |
2453 | }; | |
2454 | ||
2455 | static void register_bdev_worker(struct work_struct *work) | |
2456 | { | |
2457 | int fail = false; | |
2458 | struct async_reg_args *args = | |
ee4a36f4 | 2459 | container_of(work, struct async_reg_args, reg_work.work); |
9e23ccf8 CL |
2460 | struct cached_dev *dc; |
2461 | ||
2462 | dc = kzalloc(sizeof(*dc), GFP_KERNEL); | |
2463 | if (!dc) { | |
2464 | fail = true; | |
2465 | put_page(virt_to_page(args->sb_disk)); | |
2466 | blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); | |
2467 | goto out; | |
2468 | } | |
2469 | ||
2470 | mutex_lock(&bch_register_lock); | |
2471 | if (register_bdev(args->sb, args->sb_disk, args->bdev, dc) < 0) | |
2472 | fail = true; | |
2473 | mutex_unlock(&bch_register_lock); | |
2474 | ||
2475 | out: | |
2476 | if (fail) | |
2477 | pr_info("error %s: fail to register backing device\n", | |
2478 | args->path); | |
2479 | kfree(args->sb); | |
2480 | kfree(args->path); | |
2481 | kfree(args); | |
2482 | module_put(THIS_MODULE); | |
2483 | } | |
2484 | ||
2485 | static void register_cache_worker(struct work_struct *work) | |
2486 | { | |
2487 | int fail = false; | |
2488 | struct async_reg_args *args = | |
ee4a36f4 | 2489 | container_of(work, struct async_reg_args, reg_work.work); |
9e23ccf8 CL |
2490 | struct cache *ca; |
2491 | ||
2492 | ca = kzalloc(sizeof(*ca), GFP_KERNEL); | |
2493 | if (!ca) { | |
2494 | fail = true; | |
2495 | put_page(virt_to_page(args->sb_disk)); | |
2496 | blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); | |
2497 | goto out; | |
2498 | } | |
2499 | ||
2500 | /* blkdev_put() will be called in bch_cache_release() */ | |
2501 | if (register_cache(args->sb, args->sb_disk, args->bdev, ca) != 0) | |
2502 | fail = true; | |
2503 | ||
2504 | out: | |
2505 | if (fail) | |
2506 | pr_info("error %s: fail to register cache device\n", | |
2507 | args->path); | |
2508 | kfree(args->sb); | |
2509 | kfree(args->path); | |
2510 | kfree(args); | |
2511 | module_put(THIS_MODULE); | |
2512 | } | |
2513 | ||
d7fae7b4 | 2514 | static void register_device_async(struct async_reg_args *args) |
9e23ccf8 CL |
2515 | { |
2516 | if (SB_IS_BDEV(args->sb)) | |
ee4a36f4 | 2517 | INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker); |
9e23ccf8 | 2518 | else |
ee4a36f4 | 2519 | INIT_DELAYED_WORK(&args->reg_work, register_cache_worker); |
9e23ccf8 | 2520 | |
ee4a36f4 CL |
2521 | /* 10 jiffies is enough for a delay */ |
2522 | queue_delayed_work(system_wq, &args->reg_work, 10); | |
9e23ccf8 CL |
2523 | } |
2524 | ||
cafe5635 KO |
2525 | static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, |
2526 | const char *buffer, size_t size) | |
2527 | { | |
50246693 | 2528 | const char *err; |
29cda393 | 2529 | char *path = NULL; |
50246693 | 2530 | struct cache_sb *sb; |
cfa0c56d | 2531 | struct cache_sb_disk *sb_disk; |
fc8f19cc | 2532 | struct block_device *bdev; |
50246693 | 2533 | ssize_t ret; |
a58e88bf CL |
2534 | bool async_registration = false; |
2535 | ||
2536 | #ifdef CONFIG_BCACHE_ASYNC_REGISTRATION | |
2537 | async_registration = true; | |
2538 | #endif | |
cafe5635 | 2539 | |
50246693 | 2540 | ret = -EBUSY; |
29cda393 | 2541 | err = "failed to reference bcache module"; |
cafe5635 | 2542 | if (!try_module_get(THIS_MODULE)) |
50246693 | 2543 | goto out; |
cafe5635 | 2544 | |
a59ff6cc CL |
2545 | /* For latest state of bcache_is_reboot */ |
2546 | smp_mb(); | |
29cda393 | 2547 | err = "bcache is in reboot"; |
a59ff6cc | 2548 | if (bcache_is_reboot) |
50246693 | 2549 | goto out_module_put; |
a59ff6cc | 2550 | |
50246693 CH |
2551 | ret = -ENOMEM; |
2552 | err = "cannot allocate memory"; | |
a56489d4 FS |
2553 | path = kstrndup(buffer, size, GFP_KERNEL); |
2554 | if (!path) | |
50246693 | 2555 | goto out_module_put; |
a56489d4 FS |
2556 | |
2557 | sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); | |
2558 | if (!sb) | |
50246693 | 2559 | goto out_free_path; |
cafe5635 | 2560 | |
50246693 | 2561 | ret = -EINVAL; |
cafe5635 KO |
2562 | err = "failed to open device"; |
2563 | bdev = blkdev_get_by_path(strim(path), | |
2564 | FMODE_READ|FMODE_WRITE|FMODE_EXCL, | |
2565 | sb); | |
f59fce84 | 2566 | if (IS_ERR(bdev)) { |
a9dd53ad | 2567 | if (bdev == ERR_PTR(-EBUSY)) { |
4e7b5671 CH |
2568 | dev_t dev; |
2569 | ||
789d21db | 2570 | mutex_lock(&bch_register_lock); |
4e7b5671 CH |
2571 | if (lookup_bdev(strim(path), &dev) == 0 && |
2572 | bch_is_open(dev)) | |
a9dd53ad GP |
2573 | err = "device already registered"; |
2574 | else | |
2575 | err = "device busy"; | |
789d21db | 2576 | mutex_unlock(&bch_register_lock); |
d7076f21 | 2577 | if (attr == &ksysfs_register_quiet) |
50246693 | 2578 | goto done; |
a9dd53ad | 2579 | } |
50246693 | 2580 | goto out_free_sb; |
f59fce84 KO |
2581 | } |
2582 | ||
2583 | err = "failed to set blocksize"; | |
2584 | if (set_blocksize(bdev, 4096)) | |
50246693 | 2585 | goto out_blkdev_put; |
cafe5635 | 2586 | |
cfa0c56d | 2587 | err = read_super(sb, bdev, &sb_disk); |
cafe5635 | 2588 | if (err) |
50246693 | 2589 | goto out_blkdev_put; |
cafe5635 | 2590 | |
cc40daf9 | 2591 | err = "failed to register device"; |
a58e88bf CL |
2592 | |
2593 | if (async_registration) { | |
9e23ccf8 CL |
2594 | /* register in asynchronous way */ |
2595 | struct async_reg_args *args = | |
2596 | kzalloc(sizeof(struct async_reg_args), GFP_KERNEL); | |
2597 | ||
2598 | if (!args) { | |
2599 | ret = -ENOMEM; | |
2600 | err = "cannot allocate memory"; | |
2601 | goto out_put_sb_page; | |
2602 | } | |
2603 | ||
2604 | args->path = path; | |
2605 | args->sb = sb; | |
2606 | args->sb_disk = sb_disk; | |
2607 | args->bdev = bdev; | |
d7fae7b4 | 2608 | register_device_async(args); |
9e23ccf8 CL |
2609 | /* No wait and returns to user space */ |
2610 | goto async_done; | |
2611 | } | |
2612 | ||
2903381f | 2613 | if (SB_IS_BDEV(sb)) { |
cafe5635 | 2614 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); |
1fae7cf0 | 2615 | |
d55f7cb2 CY |
2616 | if (!dc) { |
2617 | ret = -ENOMEM; | |
2618 | err = "cannot allocate memory"; | |
50246693 | 2619 | goto out_put_sb_page; |
d55f7cb2 | 2620 | } |
cafe5635 | 2621 | |
4fa03402 | 2622 | mutex_lock(&bch_register_lock); |
cfa0c56d | 2623 | ret = register_bdev(sb, sb_disk, bdev, dc); |
4fa03402 | 2624 | mutex_unlock(&bch_register_lock); |
bb6d355c | 2625 | /* blkdev_put() will be called in cached_dev_free() */ |
fc8f19cc CH |
2626 | if (ret < 0) |
2627 | goto out_free_sb; | |
cafe5635 KO |
2628 | } else { |
2629 | struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); | |
1fae7cf0 | 2630 | |
d55f7cb2 CY |
2631 | if (!ca) { |
2632 | ret = -ENOMEM; | |
2633 | err = "cannot allocate memory"; | |
50246693 | 2634 | goto out_put_sb_page; |
d55f7cb2 | 2635 | } |
cafe5635 | 2636 | |
bb6d355c | 2637 | /* blkdev_put() will be called in bch_cache_release() */ |
d55f7cb2 CY |
2638 | ret = register_cache(sb, sb_disk, bdev, ca); |
2639 | if (ret) | |
fc8f19cc | 2640 | goto out_free_sb; |
cafe5635 | 2641 | } |
50246693 | 2642 | |
50246693 | 2643 | done: |
cafe5635 KO |
2644 | kfree(sb); |
2645 | kfree(path); | |
cafe5635 | 2646 | module_put(THIS_MODULE); |
9e23ccf8 | 2647 | async_done: |
50246693 CH |
2648 | return size; |
2649 | ||
2650 | out_put_sb_page: | |
cfa0c56d | 2651 | put_page(virt_to_page(sb_disk)); |
50246693 | 2652 | out_blkdev_put: |
fc8f19cc | 2653 | blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); |
50246693 CH |
2654 | out_free_sb: |
2655 | kfree(sb); | |
2656 | out_free_path: | |
2657 | kfree(path); | |
ae3cd299 | 2658 | path = NULL; |
50246693 CH |
2659 | out_module_put: |
2660 | module_put(THIS_MODULE); | |
2661 | out: | |
46f5aa88 | 2662 | pr_info("error %s: %s\n", path?path:"", err); |
50246693 | 2663 | return ret; |
cafe5635 KO |
2664 | } |
2665 | ||
0c277e21 CL |
2666 | |
2667 | struct pdev { | |
2668 | struct list_head list; | |
2669 | struct cached_dev *dc; | |
2670 | }; | |
2671 | ||
2672 | static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, | |
2673 | struct kobj_attribute *attr, | |
2674 | const char *buffer, | |
2675 | size_t size) | |
2676 | { | |
2677 | LIST_HEAD(pending_devs); | |
2678 | ssize_t ret = size; | |
2679 | struct cached_dev *dc, *tdc; | |
2680 | struct pdev *pdev, *tpdev; | |
2681 | struct cache_set *c, *tc; | |
2682 | ||
2683 | mutex_lock(&bch_register_lock); | |
2684 | list_for_each_entry_safe(dc, tdc, &uncached_devices, list) { | |
2685 | pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL); | |
2686 | if (!pdev) | |
2687 | break; | |
2688 | pdev->dc = dc; | |
2689 | list_add(&pdev->list, &pending_devs); | |
2690 | } | |
2691 | ||
2692 | list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { | |
e8092707 | 2693 | char *pdev_set_uuid = pdev->dc->sb.set_uuid; |
0c277e21 | 2694 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { |
1132e56e | 2695 | char *set_uuid = c->set_uuid; |
0c277e21 CL |
2696 | |
2697 | if (!memcmp(pdev_set_uuid, set_uuid, 16)) { | |
2698 | list_del(&pdev->list); | |
2699 | kfree(pdev); | |
2700 | break; | |
2701 | } | |
2702 | } | |
2703 | } | |
2704 | mutex_unlock(&bch_register_lock); | |
2705 | ||
2706 | list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { | |
46f5aa88 | 2707 | pr_info("delete pdev %p\n", pdev); |
0c277e21 CL |
2708 | list_del(&pdev->list); |
2709 | bcache_device_stop(&pdev->dc->disk); | |
2710 | kfree(pdev); | |
2711 | } | |
2712 | ||
2713 | return ret; | |
2714 | } | |
2715 | ||
cafe5635 KO |
2716 | static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) |
2717 | { | |
a59ff6cc CL |
2718 | if (bcache_is_reboot) |
2719 | return NOTIFY_DONE; | |
2720 | ||
cafe5635 KO |
2721 | if (code == SYS_DOWN || |
2722 | code == SYS_HALT || | |
2723 | code == SYS_POWER_OFF) { | |
2724 | DEFINE_WAIT(wait); | |
2725 | unsigned long start = jiffies; | |
2726 | bool stopped = false; | |
2727 | ||
2728 | struct cache_set *c, *tc; | |
2729 | struct cached_dev *dc, *tdc; | |
2730 | ||
2731 | mutex_lock(&bch_register_lock); | |
2732 | ||
a59ff6cc CL |
2733 | if (bcache_is_reboot) |
2734 | goto out; | |
2735 | ||
2736 | /* New registration is rejected since now */ | |
2737 | bcache_is_reboot = true; | |
2738 | /* | |
2739 | * Make registering caller (if there is) on other CPU | |
2740 | * core know bcache_is_reboot set to true earlier | |
2741 | */ | |
2742 | smp_mb(); | |
2743 | ||
cafe5635 KO |
2744 | if (list_empty(&bch_cache_sets) && |
2745 | list_empty(&uncached_devices)) | |
2746 | goto out; | |
2747 | ||
a59ff6cc CL |
2748 | mutex_unlock(&bch_register_lock); |
2749 | ||
46f5aa88 | 2750 | pr_info("Stopping all devices:\n"); |
cafe5635 | 2751 | |
a59ff6cc CL |
2752 | /* |
2753 | * The reason bch_register_lock is not held to call | |
2754 | * bch_cache_set_stop() and bcache_device_stop() is to | |
2755 | * avoid potential deadlock during reboot, because cache | |
a307e2ab | 2756 | * set or bcache device stopping process will acquire |
a59ff6cc CL |
2757 | * bch_register_lock too. |
2758 | * | |
2759 | * We are safe here because bcache_is_reboot sets to | |
2760 | * true already, register_bcache() will reject new | |
2761 | * registration now. bcache_is_reboot also makes sure | |
2762 | * bcache_reboot() won't be re-entered on by other thread, | |
2763 | * so there is no race in following list iteration by | |
2764 | * list_for_each_entry_safe(). | |
2765 | */ | |
cafe5635 KO |
2766 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) |
2767 | bch_cache_set_stop(c); | |
2768 | ||
2769 | list_for_each_entry_safe(dc, tdc, &uncached_devices, list) | |
2770 | bcache_device_stop(&dc->disk); | |
2771 | ||
eb8cbb6d CL |
2772 | |
2773 | /* | |
2774 | * Give an early chance for other kthreads and | |
2775 | * kworkers to stop themselves | |
2776 | */ | |
2777 | schedule(); | |
2778 | ||
cafe5635 KO |
2779 | /* What's a condition variable? */ |
2780 | while (1) { | |
eb8cbb6d | 2781 | long timeout = start + 10 * HZ - jiffies; |
cafe5635 | 2782 | |
eb8cbb6d | 2783 | mutex_lock(&bch_register_lock); |
cafe5635 KO |
2784 | stopped = list_empty(&bch_cache_sets) && |
2785 | list_empty(&uncached_devices); | |
2786 | ||
2787 | if (timeout < 0 || stopped) | |
2788 | break; | |
2789 | ||
2790 | prepare_to_wait(&unregister_wait, &wait, | |
2791 | TASK_UNINTERRUPTIBLE); | |
2792 | ||
2793 | mutex_unlock(&bch_register_lock); | |
2794 | schedule_timeout(timeout); | |
cafe5635 KO |
2795 | } |
2796 | ||
2797 | finish_wait(&unregister_wait, &wait); | |
2798 | ||
2799 | if (stopped) | |
46f5aa88 | 2800 | pr_info("All devices stopped\n"); |
cafe5635 | 2801 | else |
46f5aa88 | 2802 | pr_notice("Timeout waiting for devices to be closed\n"); |
cafe5635 KO |
2803 | out: |
2804 | mutex_unlock(&bch_register_lock); | |
2805 | } | |
2806 | ||
2807 | return NOTIFY_DONE; | |
2808 | } | |
2809 | ||
2810 | static struct notifier_block reboot = { | |
2811 | .notifier_call = bcache_reboot, | |
2812 | .priority = INT_MAX, /* before any real devices */ | |
2813 | }; | |
2814 | ||
2815 | static void bcache_exit(void) | |
2816 | { | |
2817 | bch_debug_exit(); | |
cafe5635 | 2818 | bch_request_exit(); |
cafe5635 KO |
2819 | if (bcache_kobj) |
2820 | kobject_put(bcache_kobj); | |
2821 | if (bcache_wq) | |
2822 | destroy_workqueue(bcache_wq); | |
0f843e65 GF |
2823 | if (bch_journal_wq) |
2824 | destroy_workqueue(bch_journal_wq); | |
afe78ab4 KK |
2825 | if (bch_flush_wq) |
2826 | destroy_workqueue(bch_flush_wq); | |
9f233ffe | 2827 | bch_btree_exit(); |
0f843e65 | 2828 | |
5c41c8a7 KO |
2829 | if (bcache_major) |
2830 | unregister_blkdev(bcache_major, "bcache"); | |
cafe5635 | 2831 | unregister_reboot_notifier(&reboot); |
330a4db8 | 2832 | mutex_destroy(&bch_register_lock); |
cafe5635 KO |
2833 | } |
2834 | ||
9aaf5165 CL |
2835 | /* Check and fixup module parameters */ |
2836 | static void check_module_parameters(void) | |
2837 | { | |
2838 | if (bch_cutoff_writeback_sync == 0) | |
2839 | bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC; | |
2840 | else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) { | |
46f5aa88 | 2841 | pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n", |
9aaf5165 CL |
2842 | bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX); |
2843 | bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX; | |
2844 | } | |
2845 | ||
2846 | if (bch_cutoff_writeback == 0) | |
2847 | bch_cutoff_writeback = CUTOFF_WRITEBACK; | |
2848 | else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) { | |
46f5aa88 | 2849 | pr_warn("set bch_cutoff_writeback (%u) to max value %u\n", |
9aaf5165 CL |
2850 | bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX); |
2851 | bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX; | |
2852 | } | |
2853 | ||
2854 | if (bch_cutoff_writeback > bch_cutoff_writeback_sync) { | |
46f5aa88 | 2855 | pr_warn("set bch_cutoff_writeback (%u) to %u\n", |
9aaf5165 CL |
2856 | bch_cutoff_writeback, bch_cutoff_writeback_sync); |
2857 | bch_cutoff_writeback = bch_cutoff_writeback_sync; | |
2858 | } | |
2859 | } | |
2860 | ||
cafe5635 KO |
2861 | static int __init bcache_init(void) |
2862 | { | |
2863 | static const struct attribute *files[] = { | |
2864 | &ksysfs_register.attr, | |
2865 | &ksysfs_register_quiet.attr, | |
0c277e21 | 2866 | &ksysfs_pendings_cleanup.attr, |
cafe5635 KO |
2867 | NULL |
2868 | }; | |
2869 | ||
9aaf5165 CL |
2870 | check_module_parameters(); |
2871 | ||
cafe5635 KO |
2872 | mutex_init(&bch_register_lock); |
2873 | init_waitqueue_head(&unregister_wait); | |
2874 | register_reboot_notifier(&reboot); | |
2875 | ||
2876 | bcache_major = register_blkdev(0, "bcache"); | |
2ecf0cdb ZL |
2877 | if (bcache_major < 0) { |
2878 | unregister_reboot_notifier(&reboot); | |
330a4db8 | 2879 | mutex_destroy(&bch_register_lock); |
cafe5635 | 2880 | return bcache_major; |
2ecf0cdb | 2881 | } |
cafe5635 | 2882 | |
9f233ffe KK |
2883 | if (bch_btree_init()) |
2884 | goto err; | |
2885 | ||
16c1fdf4 FS |
2886 | bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0); |
2887 | if (!bcache_wq) | |
2888 | goto err; | |
2889 | ||
afe78ab4 KK |
2890 | /* |
2891 | * Let's not make this `WQ_MEM_RECLAIM` for the following reasons: | |
2892 | * | |
2893 | * 1. It used `system_wq` before which also does no memory reclaim. | |
2894 | * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and | |
2895 | * reduced throughput can be observed. | |
2896 | * | |
2897 | * We still want to user our own queue to not congest the `system_wq`. | |
2898 | */ | |
2899 | bch_flush_wq = alloc_workqueue("bch_flush", 0, 0); | |
2900 | if (!bch_flush_wq) | |
2901 | goto err; | |
2902 | ||
0f843e65 GF |
2903 | bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); |
2904 | if (!bch_journal_wq) | |
2905 | goto err; | |
2906 | ||
16c1fdf4 FS |
2907 | bcache_kobj = kobject_create_and_add("bcache", fs_kobj); |
2908 | if (!bcache_kobj) | |
2909 | goto err; | |
2910 | ||
2911 | if (bch_request_init() || | |
330a4db8 | 2912 | sysfs_create_files(bcache_kobj, files)) |
cafe5635 KO |
2913 | goto err; |
2914 | ||
91bafdf0 | 2915 | bch_debug_init(); |
78ac2107 CL |
2916 | closure_debug_init(); |
2917 | ||
a59ff6cc CL |
2918 | bcache_is_reboot = false; |
2919 | ||
cafe5635 KO |
2920 | return 0; |
2921 | err: | |
2922 | bcache_exit(); | |
2923 | return -ENOMEM; | |
2924 | } | |
2925 | ||
9aaf5165 CL |
2926 | /* |
2927 | * Module hooks | |
2928 | */ | |
cafe5635 KO |
2929 | module_exit(bcache_exit); |
2930 | module_init(bcache_init); | |
009673d0 | 2931 | |
9aaf5165 CL |
2932 | module_param(bch_cutoff_writeback, uint, 0); |
2933 | MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback"); | |
2934 | ||
2935 | module_param(bch_cutoff_writeback_sync, uint, 0); | |
2936 | MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback"); | |
2937 | ||
009673d0 CL |
2938 | MODULE_DESCRIPTION("Bcache: a Linux block layer cache"); |
2939 | MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); | |
2940 | MODULE_LICENSE("GPL"); |