Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
cafe5635 KO |
2 | /* |
3 | * Main bcache entry point - handle a read or a write request and decide what to | |
4 | * do with it; the make_request functions are called by the block layer. | |
5 | * | |
6 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
7 | * Copyright 2012 Google, Inc. | |
8 | */ | |
9 | ||
10 | #include "bcache.h" | |
11 | #include "btree.h" | |
12 | #include "debug.h" | |
13 | #include "request.h" | |
279afbad | 14 | #include "writeback.h" |
cafe5635 | 15 | |
cafe5635 KO |
16 | #include <linux/module.h> |
17 | #include <linux/hash.h> | |
18 | #include <linux/random.h> | |
66114cad | 19 | #include <linux/backing-dev.h> |
cafe5635 KO |
20 | |
21 | #include <trace/events/bcache.h> | |
22 | ||
23 | #define CUTOFF_CACHE_ADD 95 | |
24 | #define CUTOFF_CACHE_READA 90 | |
cafe5635 KO |
25 | |
26 | struct kmem_cache *bch_search_cache; | |
27 | ||
a34a8bfd KO |
28 | static void bch_data_insert_start(struct closure *); |
29 | ||
23850102 | 30 | static unsigned cache_mode(struct cached_dev *dc) |
cafe5635 | 31 | { |
cafe5635 KO |
32 | return BDEV_CACHE_MODE(&dc->sb); |
33 | } | |
34 | ||
23850102 | 35 | static bool verify(struct cached_dev *dc) |
cafe5635 | 36 | { |
cafe5635 KO |
37 | return dc->verify; |
38 | } | |
39 | ||
40 | static void bio_csum(struct bio *bio, struct bkey *k) | |
41 | { | |
7988613b KO |
42 | struct bio_vec bv; |
43 | struct bvec_iter iter; | |
cafe5635 | 44 | uint64_t csum = 0; |
cafe5635 | 45 | |
7988613b KO |
46 | bio_for_each_segment(bv, bio, iter) { |
47 | void *d = kmap(bv.bv_page) + bv.bv_offset; | |
48 | csum = bch_crc64_update(csum, d, bv.bv_len); | |
49 | kunmap(bv.bv_page); | |
cafe5635 KO |
50 | } |
51 | ||
52 | k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); | |
53 | } | |
54 | ||
55 | /* Insert data into cache */ | |
56 | ||
a34a8bfd | 57 | static void bch_data_insert_keys(struct closure *cl) |
cafe5635 | 58 | { |
220bb38c | 59 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
c18536a7 | 60 | atomic_t *journal_ref = NULL; |
220bb38c | 61 | struct bkey *replace_key = op->replace ? &op->replace_key : NULL; |
6054c6d4 | 62 | int ret; |
cafe5635 | 63 | |
a34a8bfd KO |
64 | /* |
65 | * If we're looping, might already be waiting on | |
66 | * another journal write - can't wait on more than one journal write at | |
67 | * a time | |
68 | * | |
69 | * XXX: this looks wrong | |
70 | */ | |
71 | #if 0 | |
72 | while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING) | |
73 | closure_sync(&s->cl); | |
74 | #endif | |
cafe5635 | 75 | |
220bb38c KO |
76 | if (!op->replace) |
77 | journal_ref = bch_journal(op->c, &op->insert_keys, | |
78 | op->flush_journal ? cl : NULL); | |
cafe5635 | 79 | |
220bb38c | 80 | ret = bch_btree_insert(op->c, &op->insert_keys, |
6054c6d4 KO |
81 | journal_ref, replace_key); |
82 | if (ret == -ESRCH) { | |
220bb38c | 83 | op->replace_collision = true; |
6054c6d4 | 84 | } else if (ret) { |
4e4cbee9 | 85 | op->status = BLK_STS_RESOURCE; |
220bb38c | 86 | op->insert_data_done = true; |
a34a8bfd | 87 | } |
cafe5635 | 88 | |
c18536a7 KO |
89 | if (journal_ref) |
90 | atomic_dec_bug(journal_ref); | |
cafe5635 | 91 | |
77b5a084 | 92 | if (!op->insert_data_done) { |
da415a09 | 93 | continue_at(cl, bch_data_insert_start, op->wq); |
77b5a084 JA |
94 | return; |
95 | } | |
cafe5635 | 96 | |
220bb38c | 97 | bch_keylist_free(&op->insert_keys); |
a34a8bfd | 98 | closure_return(cl); |
cafe5635 KO |
99 | } |
100 | ||
085d2a3d KO |
101 | static int bch_keylist_realloc(struct keylist *l, unsigned u64s, |
102 | struct cache_set *c) | |
103 | { | |
104 | size_t oldsize = bch_keylist_nkeys(l); | |
105 | size_t newsize = oldsize + u64s; | |
106 | ||
107 | /* | |
108 | * The journalling code doesn't handle the case where the keys to insert | |
109 | * is bigger than an empty write: If we just return -ENOMEM here, | |
110 | * bio_insert() and bio_invalidate() will insert the keys created so far | |
111 | * and finish the rest when the keylist is empty. | |
112 | */ | |
113 | if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) | |
114 | return -ENOMEM; | |
115 | ||
116 | return __bch_keylist_realloc(l, u64s); | |
117 | } | |
118 | ||
a34a8bfd KO |
119 | static void bch_data_invalidate(struct closure *cl) |
120 | { | |
220bb38c KO |
121 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
122 | struct bio *bio = op->bio; | |
a34a8bfd KO |
123 | |
124 | pr_debug("invalidating %i sectors from %llu", | |
4f024f37 | 125 | bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); |
a34a8bfd KO |
126 | |
127 | while (bio_sectors(bio)) { | |
81ab4190 KO |
128 | unsigned sectors = min(bio_sectors(bio), |
129 | 1U << (KEY_SIZE_BITS - 1)); | |
a34a8bfd | 130 | |
085d2a3d | 131 | if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) |
a34a8bfd KO |
132 | goto out; |
133 | ||
4f024f37 KO |
134 | bio->bi_iter.bi_sector += sectors; |
135 | bio->bi_iter.bi_size -= sectors << 9; | |
a34a8bfd | 136 | |
220bb38c | 137 | bch_keylist_add(&op->insert_keys, |
4f024f37 | 138 | &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); |
a34a8bfd KO |
139 | } |
140 | ||
220bb38c | 141 | op->insert_data_done = true; |
a34a8bfd KO |
142 | bio_put(bio); |
143 | out: | |
da415a09 | 144 | continue_at(cl, bch_data_insert_keys, op->wq); |
a34a8bfd KO |
145 | } |
146 | ||
147 | static void bch_data_insert_error(struct closure *cl) | |
cafe5635 | 148 | { |
220bb38c | 149 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
cafe5635 KO |
150 | |
151 | /* | |
152 | * Our data write just errored, which means we've got a bunch of keys to | |
153 | * insert that point to data that wasn't succesfully written. | |
154 | * | |
155 | * We don't have to insert those keys but we still have to invalidate | |
156 | * that region of the cache - so, if we just strip off all the pointers | |
157 | * from the keys we'll accomplish just that. | |
158 | */ | |
159 | ||
220bb38c | 160 | struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; |
cafe5635 | 161 | |
220bb38c | 162 | while (src != op->insert_keys.top) { |
cafe5635 KO |
163 | struct bkey *n = bkey_next(src); |
164 | ||
165 | SET_KEY_PTRS(src, 0); | |
c2f95ae2 | 166 | memmove(dst, src, bkey_bytes(src)); |
cafe5635 KO |
167 | |
168 | dst = bkey_next(dst); | |
169 | src = n; | |
170 | } | |
171 | ||
220bb38c | 172 | op->insert_keys.top = dst; |
cafe5635 | 173 | |
a34a8bfd | 174 | bch_data_insert_keys(cl); |
cafe5635 KO |
175 | } |
176 | ||
4246a0b6 | 177 | static void bch_data_insert_endio(struct bio *bio) |
cafe5635 KO |
178 | { |
179 | struct closure *cl = bio->bi_private; | |
220bb38c | 180 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
cafe5635 | 181 | |
4e4cbee9 | 182 | if (bio->bi_status) { |
cafe5635 | 183 | /* TODO: We could try to recover from this. */ |
220bb38c | 184 | if (op->writeback) |
4e4cbee9 | 185 | op->status = bio->bi_status; |
220bb38c | 186 | else if (!op->replace) |
da415a09 | 187 | set_closure_fn(cl, bch_data_insert_error, op->wq); |
cafe5635 KO |
188 | else |
189 | set_closure_fn(cl, NULL, NULL); | |
190 | } | |
191 | ||
4e4cbee9 | 192 | bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); |
cafe5635 KO |
193 | } |
194 | ||
a34a8bfd | 195 | static void bch_data_insert_start(struct closure *cl) |
cafe5635 | 196 | { |
220bb38c KO |
197 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
198 | struct bio *bio = op->bio, *n; | |
cafe5635 | 199 | |
e3b4825b NS |
200 | if (op->bypass) |
201 | return bch_data_invalidate(cl); | |
202 | ||
69daf03a TJ |
203 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) |
204 | wake_up_gc(op->c); | |
205 | ||
54d12f2b | 206 | /* |
28a8f0d3 | 207 | * Journal writes are marked REQ_PREFLUSH; if the original write was a |
54d12f2b KO |
208 | * flush, it'll wait on the journal write. |
209 | */ | |
1eff9d32 | 210 | bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); |
54d12f2b | 211 | |
cafe5635 KO |
212 | do { |
213 | unsigned i; | |
214 | struct bkey *k; | |
220bb38c | 215 | struct bio_set *split = op->c->bio_split; |
cafe5635 KO |
216 | |
217 | /* 1 for the device pointer and 1 for the chksum */ | |
220bb38c | 218 | if (bch_keylist_realloc(&op->insert_keys, |
085d2a3d | 219 | 3 + (op->csum ? 1 : 0), |
77b5a084 | 220 | op->c)) { |
da415a09 | 221 | continue_at(cl, bch_data_insert_keys, op->wq); |
77b5a084 JA |
222 | return; |
223 | } | |
cafe5635 | 224 | |
220bb38c | 225 | k = op->insert_keys.top; |
cafe5635 | 226 | bkey_init(k); |
220bb38c | 227 | SET_KEY_INODE(k, op->inode); |
4f024f37 | 228 | SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); |
cafe5635 | 229 | |
2599b53b KO |
230 | if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), |
231 | op->write_point, op->write_prio, | |
232 | op->writeback)) | |
cafe5635 KO |
233 | goto err; |
234 | ||
20d0189b | 235 | n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); |
cafe5635 | 236 | |
a34a8bfd | 237 | n->bi_end_io = bch_data_insert_endio; |
cafe5635 KO |
238 | n->bi_private = cl; |
239 | ||
220bb38c | 240 | if (op->writeback) { |
cafe5635 KO |
241 | SET_KEY_DIRTY(k, true); |
242 | ||
243 | for (i = 0; i < KEY_PTRS(k); i++) | |
220bb38c | 244 | SET_GC_MARK(PTR_BUCKET(op->c, k, i), |
cafe5635 KO |
245 | GC_MARK_DIRTY); |
246 | } | |
247 | ||
220bb38c | 248 | SET_KEY_CSUM(k, op->csum); |
cafe5635 KO |
249 | if (KEY_CSUM(k)) |
250 | bio_csum(n, k); | |
251 | ||
c37511b8 | 252 | trace_bcache_cache_insert(k); |
220bb38c | 253 | bch_keylist_push(&op->insert_keys); |
cafe5635 | 254 | |
ad0d9e76 | 255 | bio_set_op_attrs(n, REQ_OP_WRITE, 0); |
220bb38c | 256 | bch_submit_bbio(n, op->c, k, 0); |
cafe5635 KO |
257 | } while (n != bio); |
258 | ||
220bb38c | 259 | op->insert_data_done = true; |
da415a09 | 260 | continue_at(cl, bch_data_insert_keys, op->wq); |
77b5a084 | 261 | return; |
cafe5635 KO |
262 | err: |
263 | /* bch_alloc_sectors() blocks if s->writeback = true */ | |
220bb38c | 264 | BUG_ON(op->writeback); |
cafe5635 KO |
265 | |
266 | /* | |
267 | * But if it's not a writeback write we'd rather just bail out if | |
268 | * there aren't any buckets ready to write to - it might take awhile and | |
269 | * we might be starving btree writes for gc or something. | |
270 | */ | |
271 | ||
220bb38c | 272 | if (!op->replace) { |
cafe5635 KO |
273 | /* |
274 | * Writethrough write: We can't complete the write until we've | |
275 | * updated the index. But we don't want to delay the write while | |
276 | * we wait for buckets to be freed up, so just invalidate the | |
277 | * rest of the write. | |
278 | */ | |
220bb38c | 279 | op->bypass = true; |
a34a8bfd | 280 | return bch_data_invalidate(cl); |
cafe5635 KO |
281 | } else { |
282 | /* | |
283 | * From a cache miss, we can just insert the keys for the data | |
284 | * we have written or bail out if we didn't do anything. | |
285 | */ | |
220bb38c | 286 | op->insert_data_done = true; |
cafe5635 KO |
287 | bio_put(bio); |
288 | ||
220bb38c | 289 | if (!bch_keylist_empty(&op->insert_keys)) |
da415a09 | 290 | continue_at(cl, bch_data_insert_keys, op->wq); |
cafe5635 KO |
291 | else |
292 | closure_return(cl); | |
293 | } | |
294 | } | |
295 | ||
296 | /** | |
a34a8bfd | 297 | * bch_data_insert - stick some data in the cache |
cafe5635 KO |
298 | * |
299 | * This is the starting point for any data to end up in a cache device; it could | |
300 | * be from a normal write, or a writeback write, or a write to a flash only | |
301 | * volume - it's also used by the moving garbage collector to compact data in | |
302 | * mostly empty buckets. | |
303 | * | |
304 | * It first writes the data to the cache, creating a list of keys to be inserted | |
305 | * (if the data had to be fragmented there will be multiple keys); after the | |
306 | * data is written it calls bch_journal, and after the keys have been added to | |
307 | * the next journal write they're inserted into the btree. | |
308 | * | |
c18536a7 | 309 | * It inserts the data in s->cache_bio; bi_sector is used for the key offset, |
cafe5635 KO |
310 | * and op->inode is used for the key inode. |
311 | * | |
c18536a7 KO |
312 | * If s->bypass is true, instead of inserting the data it invalidates the |
313 | * region of the cache represented by s->cache_bio and op->inode. | |
cafe5635 | 314 | */ |
a34a8bfd | 315 | void bch_data_insert(struct closure *cl) |
cafe5635 | 316 | { |
220bb38c | 317 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
cafe5635 | 318 | |
60ae81ee SP |
319 | trace_bcache_write(op->c, op->inode, op->bio, |
320 | op->writeback, op->bypass); | |
220bb38c KO |
321 | |
322 | bch_keylist_init(&op->insert_keys); | |
323 | bio_get(op->bio); | |
a34a8bfd | 324 | bch_data_insert_start(cl); |
cafe5635 KO |
325 | } |
326 | ||
220bb38c KO |
327 | /* Congested? */ |
328 | ||
329 | unsigned bch_get_congested(struct cache_set *c) | |
330 | { | |
331 | int i; | |
332 | long rand; | |
333 | ||
334 | if (!c->congested_read_threshold_us && | |
335 | !c->congested_write_threshold_us) | |
336 | return 0; | |
337 | ||
338 | i = (local_clock_us() - c->congested_last_us) / 1024; | |
339 | if (i < 0) | |
340 | return 0; | |
341 | ||
342 | i += atomic_read(&c->congested); | |
343 | if (i >= 0) | |
344 | return 0; | |
345 | ||
346 | i += CONGESTED_MAX; | |
347 | ||
348 | if (i > 0) | |
349 | i = fract_exp_two(i, 6); | |
350 | ||
351 | rand = get_random_int(); | |
352 | i -= bitmap_weight(&rand, BITS_PER_LONG); | |
353 | ||
354 | return i > 0 ? i : 1; | |
355 | } | |
356 | ||
357 | static void add_sequential(struct task_struct *t) | |
358 | { | |
359 | ewma_add(t->sequential_io_avg, | |
360 | t->sequential_io, 8, 0); | |
361 | ||
362 | t->sequential_io = 0; | |
363 | } | |
364 | ||
365 | static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) | |
366 | { | |
367 | return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; | |
368 | } | |
369 | ||
370 | static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |
371 | { | |
372 | struct cache_set *c = dc->disk.c; | |
23850102 | 373 | unsigned mode = cache_mode(dc); |
220bb38c KO |
374 | unsigned sectors, congested = bch_get_congested(c); |
375 | struct task_struct *task = current; | |
8aee1220 | 376 | struct io *i; |
220bb38c | 377 | |
c4d951dd | 378 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
220bb38c | 379 | c->gc_stats.in_use > CUTOFF_CACHE_ADD || |
ad0d9e76 | 380 | (bio_op(bio) == REQ_OP_DISCARD)) |
220bb38c KO |
381 | goto skip; |
382 | ||
383 | if (mode == CACHE_MODE_NONE || | |
384 | (mode == CACHE_MODE_WRITEAROUND && | |
c8d93247 | 385 | op_is_write(bio_op(bio)))) |
220bb38c KO |
386 | goto skip; |
387 | ||
b41c9b02 EW |
388 | /* |
389 | * Flag for bypass if the IO is for read-ahead or background, | |
390 | * unless the read-ahead request is for metadata (eg, for gfs2). | |
391 | */ | |
392 | if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) && | |
393 | !(bio->bi_opf & REQ_META)) | |
394 | goto skip; | |
395 | ||
4f024f37 | 396 | if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || |
220bb38c KO |
397 | bio_sectors(bio) & (c->sb.block_size - 1)) { |
398 | pr_debug("skipping unaligned io"); | |
399 | goto skip; | |
400 | } | |
401 | ||
5ceaaad7 KO |
402 | if (bypass_torture_test(dc)) { |
403 | if ((get_random_int() & 3) == 3) | |
404 | goto skip; | |
405 | else | |
406 | goto rescale; | |
407 | } | |
408 | ||
220bb38c KO |
409 | if (!congested && !dc->sequential_cutoff) |
410 | goto rescale; | |
411 | ||
8aee1220 | 412 | spin_lock(&dc->io_lock); |
220bb38c | 413 | |
4f024f37 KO |
414 | hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) |
415 | if (i->last == bio->bi_iter.bi_sector && | |
8aee1220 KO |
416 | time_before(jiffies, i->jiffies)) |
417 | goto found; | |
220bb38c | 418 | |
8aee1220 | 419 | i = list_first_entry(&dc->io_lru, struct io, lru); |
220bb38c | 420 | |
8aee1220 KO |
421 | add_sequential(task); |
422 | i->sequential = 0; | |
220bb38c | 423 | found: |
4f024f37 KO |
424 | if (i->sequential + bio->bi_iter.bi_size > i->sequential) |
425 | i->sequential += bio->bi_iter.bi_size; | |
220bb38c | 426 | |
8aee1220 KO |
427 | i->last = bio_end_sector(bio); |
428 | i->jiffies = jiffies + msecs_to_jiffies(5000); | |
429 | task->sequential_io = i->sequential; | |
220bb38c | 430 | |
8aee1220 KO |
431 | hlist_del(&i->hash); |
432 | hlist_add_head(&i->hash, iohash(dc, i->last)); | |
433 | list_move_tail(&i->lru, &dc->io_lru); | |
220bb38c | 434 | |
8aee1220 | 435 | spin_unlock(&dc->io_lock); |
220bb38c KO |
436 | |
437 | sectors = max(task->sequential_io, | |
438 | task->sequential_io_avg) >> 9; | |
439 | ||
440 | if (dc->sequential_cutoff && | |
441 | sectors >= dc->sequential_cutoff >> 9) { | |
442 | trace_bcache_bypass_sequential(bio); | |
443 | goto skip; | |
444 | } | |
445 | ||
446 | if (congested && sectors >= congested) { | |
447 | trace_bcache_bypass_congested(bio); | |
448 | goto skip; | |
449 | } | |
450 | ||
451 | rescale: | |
452 | bch_rescale_priorities(c, bio_sectors(bio)); | |
453 | return false; | |
454 | skip: | |
455 | bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); | |
456 | return true; | |
457 | } | |
458 | ||
2c1953e2 | 459 | /* Cache lookup */ |
cafe5635 | 460 | |
220bb38c KO |
461 | struct search { |
462 | /* Stack frame for bio_complete */ | |
463 | struct closure cl; | |
464 | ||
220bb38c KO |
465 | struct bbio bio; |
466 | struct bio *orig_bio; | |
467 | struct bio *cache_miss; | |
a5ae4300 | 468 | struct bcache_device *d; |
220bb38c KO |
469 | |
470 | unsigned insert_bio_sectors; | |
220bb38c | 471 | unsigned recoverable:1; |
220bb38c | 472 | unsigned write:1; |
5ceaaad7 | 473 | unsigned read_dirty_data:1; |
c1573137 | 474 | unsigned cache_missed:1; |
220bb38c KO |
475 | |
476 | unsigned long start_time; | |
477 | ||
478 | struct btree_op op; | |
479 | struct data_insert_op iop; | |
480 | }; | |
481 | ||
4246a0b6 | 482 | static void bch_cache_read_endio(struct bio *bio) |
cafe5635 KO |
483 | { |
484 | struct bbio *b = container_of(bio, struct bbio, bio); | |
485 | struct closure *cl = bio->bi_private; | |
486 | struct search *s = container_of(cl, struct search, cl); | |
487 | ||
488 | /* | |
489 | * If the bucket was reused while our bio was in flight, we might have | |
490 | * read the wrong data. Set s->error but not error so it doesn't get | |
491 | * counted against the cache device, but we'll still reread the data | |
492 | * from the backing device. | |
493 | */ | |
494 | ||
4e4cbee9 CH |
495 | if (bio->bi_status) |
496 | s->iop.status = bio->bi_status; | |
d56d000a KO |
497 | else if (!KEY_DIRTY(&b->key) && |
498 | ptr_stale(s->iop.c, &b->key, 0)) { | |
220bb38c | 499 | atomic_long_inc(&s->iop.c->cache_read_races); |
4e4cbee9 | 500 | s->iop.status = BLK_STS_IOERR; |
cafe5635 KO |
501 | } |
502 | ||
4e4cbee9 | 503 | bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); |
cafe5635 KO |
504 | } |
505 | ||
2c1953e2 KO |
506 | /* |
507 | * Read from a single key, handling the initial cache miss if the key starts in | |
508 | * the middle of the bio | |
509 | */ | |
cc231966 | 510 | static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) |
2c1953e2 KO |
511 | { |
512 | struct search *s = container_of(op, struct search, op); | |
cc231966 KO |
513 | struct bio *n, *bio = &s->bio.bio; |
514 | struct bkey *bio_key; | |
2c1953e2 | 515 | unsigned ptr; |
2c1953e2 | 516 | |
4f024f37 | 517 | if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) |
cc231966 KO |
518 | return MAP_CONTINUE; |
519 | ||
220bb38c | 520 | if (KEY_INODE(k) != s->iop.inode || |
4f024f37 | 521 | KEY_START(k) > bio->bi_iter.bi_sector) { |
cc231966 | 522 | unsigned bio_sectors = bio_sectors(bio); |
220bb38c | 523 | unsigned sectors = KEY_INODE(k) == s->iop.inode |
cc231966 | 524 | ? min_t(uint64_t, INT_MAX, |
4f024f37 | 525 | KEY_START(k) - bio->bi_iter.bi_sector) |
cc231966 KO |
526 | : INT_MAX; |
527 | ||
528 | int ret = s->d->cache_miss(b, s, bio, sectors); | |
529 | if (ret != MAP_CONTINUE) | |
530 | return ret; | |
531 | ||
532 | /* if this was a complete miss we shouldn't get here */ | |
533 | BUG_ON(bio_sectors <= sectors); | |
534 | } | |
535 | ||
536 | if (!KEY_SIZE(k)) | |
537 | return MAP_CONTINUE; | |
2c1953e2 KO |
538 | |
539 | /* XXX: figure out best pointer - for multiple cache devices */ | |
540 | ptr = 0; | |
541 | ||
542 | PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; | |
543 | ||
5ceaaad7 KO |
544 | if (KEY_DIRTY(k)) |
545 | s->read_dirty_data = true; | |
546 | ||
20d0189b KO |
547 | n = bio_next_split(bio, min_t(uint64_t, INT_MAX, |
548 | KEY_OFFSET(k) - bio->bi_iter.bi_sector), | |
549 | GFP_NOIO, s->d->bio_split); | |
2c1953e2 | 550 | |
cc231966 KO |
551 | bio_key = &container_of(n, struct bbio, bio)->key; |
552 | bch_bkey_copy_single_ptr(bio_key, k, ptr); | |
2c1953e2 | 553 | |
4f024f37 | 554 | bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); |
220bb38c | 555 | bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); |
2c1953e2 | 556 | |
cc231966 KO |
557 | n->bi_end_io = bch_cache_read_endio; |
558 | n->bi_private = &s->cl; | |
2c1953e2 | 559 | |
cc231966 KO |
560 | /* |
561 | * The bucket we're reading from might be reused while our bio | |
562 | * is in flight, and we could then end up reading the wrong | |
563 | * data. | |
564 | * | |
565 | * We guard against this by checking (in cache_read_endio()) if | |
566 | * the pointer is stale again; if so, we treat it as an error | |
567 | * and reread from the backing device (but we don't pass that | |
568 | * error up anywhere). | |
569 | */ | |
2c1953e2 | 570 | |
cc231966 KO |
571 | __bch_submit_bbio(n, b->c); |
572 | return n == bio ? MAP_DONE : MAP_CONTINUE; | |
2c1953e2 KO |
573 | } |
574 | ||
575 | static void cache_lookup(struct closure *cl) | |
576 | { | |
220bb38c | 577 | struct search *s = container_of(cl, struct search, iop.cl); |
2c1953e2 | 578 | struct bio *bio = &s->bio.bio; |
b221fc13 | 579 | struct cached_dev *dc; |
a5ae4300 | 580 | int ret; |
2c1953e2 | 581 | |
a5ae4300 | 582 | bch_btree_op_init(&s->op, -1); |
2c1953e2 | 583 | |
a5ae4300 KO |
584 | ret = bch_btree_map_keys(&s->op, s->iop.c, |
585 | &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), | |
586 | cache_lookup_fn, MAP_END_KEY); | |
77b5a084 | 587 | if (ret == -EAGAIN) { |
2c1953e2 | 588 | continue_at(cl, cache_lookup, bcache_wq); |
77b5a084 JA |
589 | return; |
590 | } | |
2c1953e2 | 591 | |
b221fc13 RH |
592 | /* |
593 | * We might meet err when searching the btree, If that happens, we will | |
594 | * get negative ret, in this scenario we should not recover data from | |
595 | * backing device (when cache device is dirty) because we don't know | |
596 | * whether bkeys the read request covered are all clean. | |
597 | * | |
598 | * And after that happened, s->iop.status is still its initial value | |
599 | * before we submit s->bio.bio | |
600 | */ | |
601 | if (ret < 0) { | |
602 | BUG_ON(ret == -EINTR); | |
603 | if (s->d && s->d->c && | |
604 | !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) { | |
605 | dc = container_of(s->d, struct cached_dev, disk); | |
606 | if (dc && atomic_read(&dc->has_dirty)) | |
607 | s->recoverable = false; | |
608 | } | |
609 | if (!s->iop.status) | |
610 | s->iop.status = BLK_STS_IOERR; | |
611 | } | |
612 | ||
2c1953e2 KO |
613 | closure_return(cl); |
614 | } | |
615 | ||
616 | /* Common code for the make_request functions */ | |
617 | ||
4246a0b6 | 618 | static void request_endio(struct bio *bio) |
2c1953e2 KO |
619 | { |
620 | struct closure *cl = bio->bi_private; | |
621 | ||
4e4cbee9 | 622 | if (bio->bi_status) { |
2c1953e2 | 623 | struct search *s = container_of(cl, struct search, cl); |
4e4cbee9 | 624 | s->iop.status = bio->bi_status; |
2c1953e2 KO |
625 | /* Only cache read errors are recoverable */ |
626 | s->recoverable = false; | |
627 | } | |
628 | ||
629 | bio_put(bio); | |
630 | closure_put(cl); | |
631 | } | |
632 | ||
cafe5635 KO |
633 | static void bio_complete(struct search *s) |
634 | { | |
635 | if (s->orig_bio) { | |
b40503ea ZZ |
636 | generic_end_io_acct(s->d->disk->queue, |
637 | bio_data_dir(s->orig_bio), | |
aae4933d | 638 | &s->d->disk->part0, s->start_time); |
cafe5635 | 639 | |
220bb38c | 640 | trace_bcache_request_end(s->d, s->orig_bio); |
4e4cbee9 | 641 | s->orig_bio->bi_status = s->iop.status; |
4246a0b6 | 642 | bio_endio(s->orig_bio); |
cafe5635 KO |
643 | s->orig_bio = NULL; |
644 | } | |
645 | } | |
646 | ||
a5ae4300 | 647 | static void do_bio_hook(struct search *s, struct bio *orig_bio) |
cafe5635 KO |
648 | { |
649 | struct bio *bio = &s->bio.bio; | |
cafe5635 | 650 | |
3a83f467 | 651 | bio_init(bio, NULL, 0); |
a5ae4300 | 652 | __bio_clone_fast(bio, orig_bio); |
cafe5635 KO |
653 | bio->bi_end_io = request_endio; |
654 | bio->bi_private = &s->cl; | |
ed9c47be | 655 | |
dac56212 | 656 | bio_cnt_set(bio, 3); |
cafe5635 KO |
657 | } |
658 | ||
659 | static void search_free(struct closure *cl) | |
660 | { | |
661 | struct search *s = container_of(cl, struct search, cl); | |
cafe5635 | 662 | |
220bb38c KO |
663 | if (s->iop.bio) |
664 | bio_put(s->iop.bio); | |
cafe5635 | 665 | |
60eb34ec | 666 | bio_complete(s); |
cafe5635 KO |
667 | closure_debug_destroy(cl); |
668 | mempool_free(s, s->d->c->search); | |
669 | } | |
670 | ||
a5ae4300 KO |
671 | static inline struct search *search_alloc(struct bio *bio, |
672 | struct bcache_device *d) | |
cafe5635 | 673 | { |
0b93207a | 674 | struct search *s; |
0b93207a KO |
675 | |
676 | s = mempool_alloc(d->c->search, GFP_NOIO); | |
cafe5635 | 677 | |
a5ae4300 KO |
678 | closure_init(&s->cl, NULL); |
679 | do_bio_hook(s, bio); | |
cafe5635 | 680 | |
cafe5635 | 681 | s->orig_bio = bio; |
a5ae4300 | 682 | s->cache_miss = NULL; |
c1573137 | 683 | s->cache_missed = 0; |
a5ae4300 | 684 | s->d = d; |
cafe5635 | 685 | s->recoverable = 1; |
c8d93247 | 686 | s->write = op_is_write(bio_op(bio)); |
a5ae4300 | 687 | s->read_dirty_data = 0; |
cafe5635 | 688 | s->start_time = jiffies; |
a5ae4300 KO |
689 | |
690 | s->iop.c = d->c; | |
691 | s->iop.bio = NULL; | |
692 | s->iop.inode = d->id; | |
693 | s->iop.write_point = hash_long((unsigned long) current, 16); | |
694 | s->iop.write_prio = 0; | |
4e4cbee9 | 695 | s->iop.status = 0; |
a5ae4300 | 696 | s->iop.flags = 0; |
f73f44eb | 697 | s->iop.flush_journal = op_is_flush(bio->bi_opf); |
da415a09 | 698 | s->iop.wq = bcache_wq; |
cafe5635 | 699 | |
cafe5635 KO |
700 | return s; |
701 | } | |
702 | ||
cafe5635 KO |
703 | /* Cached devices */ |
704 | ||
705 | static void cached_dev_bio_complete(struct closure *cl) | |
706 | { | |
707 | struct search *s = container_of(cl, struct search, cl); | |
708 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
709 | ||
710 | search_free(cl); | |
711 | cached_dev_put(dc); | |
712 | } | |
713 | ||
714 | /* Process reads */ | |
715 | ||
cdd972b1 | 716 | static void cached_dev_cache_miss_done(struct closure *cl) |
cafe5635 KO |
717 | { |
718 | struct search *s = container_of(cl, struct search, cl); | |
719 | ||
220bb38c KO |
720 | if (s->iop.replace_collision) |
721 | bch_mark_cache_miss_collision(s->iop.c, s->d); | |
cafe5635 | 722 | |
491221f8 GJ |
723 | if (s->iop.bio) |
724 | bio_free_pages(s->iop.bio); | |
cafe5635 KO |
725 | |
726 | cached_dev_bio_complete(cl); | |
727 | } | |
728 | ||
cdd972b1 | 729 | static void cached_dev_read_error(struct closure *cl) |
cafe5635 KO |
730 | { |
731 | struct search *s = container_of(cl, struct search, cl); | |
cdd972b1 | 732 | struct bio *bio = &s->bio.bio; |
cafe5635 | 733 | |
d59b2379 | 734 | /* |
e393aa24 RH |
735 | * If read request hit dirty data (s->read_dirty_data is true), |
736 | * then recovery a failed read request from cached device may | |
737 | * get a stale data back. So read failure recovery is only | |
738 | * permitted when read request hit clean data in cache device, | |
739 | * or when cache read race happened. | |
d59b2379 | 740 | */ |
e393aa24 | 741 | if (s->recoverable && !s->read_dirty_data) { |
c37511b8 KO |
742 | /* Retry from the backing device: */ |
743 | trace_bcache_read_retry(s->orig_bio); | |
cafe5635 | 744 | |
4e4cbee9 | 745 | s->iop.status = 0; |
a5ae4300 | 746 | do_bio_hook(s, s->orig_bio); |
cafe5635 KO |
747 | |
748 | /* XXX: invalidate cache */ | |
749 | ||
749b61da | 750 | closure_bio_submit(bio, cl); |
cafe5635 KO |
751 | } |
752 | ||
cdd972b1 | 753 | continue_at(cl, cached_dev_cache_miss_done, NULL); |
cafe5635 KO |
754 | } |
755 | ||
cdd972b1 | 756 | static void cached_dev_read_done(struct closure *cl) |
cafe5635 KO |
757 | { |
758 | struct search *s = container_of(cl, struct search, cl); | |
759 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
760 | ||
761 | /* | |
cdd972b1 KO |
762 | * We had a cache miss; cache_bio now contains data ready to be inserted |
763 | * into the cache. | |
cafe5635 KO |
764 | * |
765 | * First, we copy the data we just read from cache_bio's bounce buffers | |
766 | * to the buffers the original bio pointed to: | |
767 | */ | |
768 | ||
220bb38c KO |
769 | if (s->iop.bio) { |
770 | bio_reset(s->iop.bio); | |
4f024f37 | 771 | s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; |
74d46992 | 772 | bio_copy_dev(s->iop.bio, s->cache_miss); |
4f024f37 | 773 | s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; |
220bb38c | 774 | bch_bio_map(s->iop.bio, NULL); |
cafe5635 | 775 | |
220bb38c | 776 | bio_copy_data(s->cache_miss, s->iop.bio); |
cafe5635 KO |
777 | |
778 | bio_put(s->cache_miss); | |
779 | s->cache_miss = NULL; | |
780 | } | |
781 | ||
23850102 | 782 | if (verify(dc) && s->recoverable && !s->read_dirty_data) |
220bb38c | 783 | bch_data_verify(dc, s->orig_bio); |
cafe5635 KO |
784 | |
785 | bio_complete(s); | |
786 | ||
220bb38c KO |
787 | if (s->iop.bio && |
788 | !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { | |
789 | BUG_ON(!s->iop.replace); | |
790 | closure_call(&s->iop.cl, bch_data_insert, NULL, cl); | |
cafe5635 KO |
791 | } |
792 | ||
cdd972b1 | 793 | continue_at(cl, cached_dev_cache_miss_done, NULL); |
cafe5635 KO |
794 | } |
795 | ||
cdd972b1 | 796 | static void cached_dev_read_done_bh(struct closure *cl) |
cafe5635 KO |
797 | { |
798 | struct search *s = container_of(cl, struct search, cl); | |
799 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
800 | ||
220bb38c | 801 | bch_mark_cache_accounting(s->iop.c, s->d, |
c1573137 | 802 | !s->cache_missed, s->iop.bypass); |
220bb38c | 803 | trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); |
cafe5635 | 804 | |
4e4cbee9 | 805 | if (s->iop.status) |
cdd972b1 | 806 | continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); |
23850102 | 807 | else if (s->iop.bio || verify(dc)) |
cdd972b1 | 808 | continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); |
cafe5635 | 809 | else |
cdd972b1 | 810 | continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); |
cafe5635 KO |
811 | } |
812 | ||
813 | static int cached_dev_cache_miss(struct btree *b, struct search *s, | |
814 | struct bio *bio, unsigned sectors) | |
815 | { | |
2c1953e2 | 816 | int ret = MAP_CONTINUE; |
e7c590eb | 817 | unsigned reada = 0; |
cafe5635 | 818 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); |
cdd972b1 | 819 | struct bio *miss, *cache_bio; |
cafe5635 | 820 | |
c1573137 | 821 | s->cache_missed = 1; |
822 | ||
220bb38c | 823 | if (s->cache_miss || s->iop.bypass) { |
20d0189b | 824 | miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
2c1953e2 | 825 | ret = miss == bio ? MAP_DONE : MAP_CONTINUE; |
e7c590eb KO |
826 | goto out_submit; |
827 | } | |
cafe5635 | 828 | |
1eff9d32 JA |
829 | if (!(bio->bi_opf & REQ_RAHEAD) && |
830 | !(bio->bi_opf & REQ_META) && | |
220bb38c | 831 | s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) |
e7c590eb | 832 | reada = min_t(sector_t, dc->readahead >> 9, |
74d46992 | 833 | get_capacity(bio->bi_disk) - bio_end_sector(bio)); |
cafe5635 | 834 | |
220bb38c | 835 | s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); |
cafe5635 | 836 | |
220bb38c | 837 | s->iop.replace_key = KEY(s->iop.inode, |
4f024f37 | 838 | bio->bi_iter.bi_sector + s->insert_bio_sectors, |
220bb38c | 839 | s->insert_bio_sectors); |
e7c590eb | 840 | |
220bb38c | 841 | ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); |
e7c590eb KO |
842 | if (ret) |
843 | return ret; | |
844 | ||
220bb38c | 845 | s->iop.replace = true; |
1b207d80 | 846 | |
20d0189b | 847 | miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
2c1953e2 KO |
848 | |
849 | /* btree_search_recurse()'s btree iterator is no good anymore */ | |
850 | ret = miss == bio ? MAP_DONE : -EINTR; | |
cafe5635 | 851 | |
cdd972b1 | 852 | cache_bio = bio_alloc_bioset(GFP_NOWAIT, |
220bb38c | 853 | DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), |
cafe5635 | 854 | dc->disk.bio_split); |
cdd972b1 | 855 | if (!cache_bio) |
cafe5635 KO |
856 | goto out_submit; |
857 | ||
4f024f37 | 858 | cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; |
74d46992 | 859 | bio_copy_dev(cache_bio, miss); |
4f024f37 | 860 | cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; |
cafe5635 | 861 | |
cdd972b1 KO |
862 | cache_bio->bi_end_io = request_endio; |
863 | cache_bio->bi_private = &s->cl; | |
cafe5635 | 864 | |
cdd972b1 | 865 | bch_bio_map(cache_bio, NULL); |
25d8be77 | 866 | if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) |
cafe5635 KO |
867 | goto out_put; |
868 | ||
220bb38c KO |
869 | if (reada) |
870 | bch_mark_cache_readahead(s->iop.c, s->d); | |
871 | ||
cdd972b1 | 872 | s->cache_miss = miss; |
220bb38c | 873 | s->iop.bio = cache_bio; |
cdd972b1 | 874 | bio_get(cache_bio); |
749b61da | 875 | closure_bio_submit(cache_bio, &s->cl); |
cafe5635 KO |
876 | |
877 | return ret; | |
878 | out_put: | |
cdd972b1 | 879 | bio_put(cache_bio); |
cafe5635 | 880 | out_submit: |
e7c590eb KO |
881 | miss->bi_end_io = request_endio; |
882 | miss->bi_private = &s->cl; | |
749b61da | 883 | closure_bio_submit(miss, &s->cl); |
cafe5635 KO |
884 | return ret; |
885 | } | |
886 | ||
cdd972b1 | 887 | static void cached_dev_read(struct cached_dev *dc, struct search *s) |
cafe5635 KO |
888 | { |
889 | struct closure *cl = &s->cl; | |
890 | ||
220bb38c | 891 | closure_call(&s->iop.cl, cache_lookup, NULL, cl); |
cdd972b1 | 892 | continue_at(cl, cached_dev_read_done_bh, NULL); |
cafe5635 KO |
893 | } |
894 | ||
895 | /* Process writes */ | |
896 | ||
897 | static void cached_dev_write_complete(struct closure *cl) | |
898 | { | |
899 | struct search *s = container_of(cl, struct search, cl); | |
900 | struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); | |
901 | ||
902 | up_read_non_owner(&dc->writeback_lock); | |
903 | cached_dev_bio_complete(cl); | |
904 | } | |
905 | ||
cdd972b1 | 906 | static void cached_dev_write(struct cached_dev *dc, struct search *s) |
cafe5635 KO |
907 | { |
908 | struct closure *cl = &s->cl; | |
909 | struct bio *bio = &s->bio.bio; | |
4f024f37 | 910 | struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); |
84f0db03 | 911 | struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); |
cafe5635 | 912 | |
220bb38c | 913 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); |
cafe5635 | 914 | |
cafe5635 | 915 | down_read_non_owner(&dc->writeback_lock); |
cafe5635 | 916 | if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { |
84f0db03 KO |
917 | /* |
918 | * We overlap with some dirty data undergoing background | |
919 | * writeback, force this write to writeback | |
920 | */ | |
220bb38c KO |
921 | s->iop.bypass = false; |
922 | s->iop.writeback = true; | |
cafe5635 KO |
923 | } |
924 | ||
84f0db03 KO |
925 | /* |
926 | * Discards aren't _required_ to do anything, so skipping if | |
927 | * check_overlapping returned true is ok | |
928 | * | |
929 | * But check_overlapping drops dirty keys for which io hasn't started, | |
930 | * so we still want to call it. | |
931 | */ | |
ad0d9e76 | 932 | if (bio_op(bio) == REQ_OP_DISCARD) |
220bb38c | 933 | s->iop.bypass = true; |
cafe5635 | 934 | |
72c27061 | 935 | if (should_writeback(dc, s->orig_bio, |
23850102 | 936 | cache_mode(dc), |
220bb38c KO |
937 | s->iop.bypass)) { |
938 | s->iop.bypass = false; | |
939 | s->iop.writeback = true; | |
72c27061 KO |
940 | } |
941 | ||
220bb38c KO |
942 | if (s->iop.bypass) { |
943 | s->iop.bio = s->orig_bio; | |
944 | bio_get(s->iop.bio); | |
cafe5635 | 945 | |
ad0d9e76 | 946 | if ((bio_op(bio) != REQ_OP_DISCARD) || |
84f0db03 | 947 | blk_queue_discard(bdev_get_queue(dc->bdev))) |
749b61da | 948 | closure_bio_submit(bio, cl); |
220bb38c | 949 | } else if (s->iop.writeback) { |
279afbad | 950 | bch_writeback_add(dc); |
220bb38c | 951 | s->iop.bio = bio; |
e49c7c37 | 952 | |
1eff9d32 | 953 | if (bio->bi_opf & REQ_PREFLUSH) { |
e49c7c37 | 954 | /* Also need to send a flush to the backing device */ |
d4eddd42 | 955 | struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, |
c0f04d88 | 956 | dc->disk.bio_split); |
e49c7c37 | 957 | |
74d46992 | 958 | bio_copy_dev(flush, bio); |
c0f04d88 KO |
959 | flush->bi_end_io = request_endio; |
960 | flush->bi_private = cl; | |
70fd7614 | 961 | flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
c0f04d88 | 962 | |
749b61da | 963 | closure_bio_submit(flush, cl); |
e49c7c37 | 964 | } |
84f0db03 | 965 | } else { |
59d276fe | 966 | s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); |
84f0db03 | 967 | |
749b61da | 968 | closure_bio_submit(bio, cl); |
cafe5635 | 969 | } |
84f0db03 | 970 | |
220bb38c | 971 | closure_call(&s->iop.cl, bch_data_insert, NULL, cl); |
cafe5635 | 972 | continue_at(cl, cached_dev_write_complete, NULL); |
cafe5635 KO |
973 | } |
974 | ||
a34a8bfd | 975 | static void cached_dev_nodata(struct closure *cl) |
cafe5635 | 976 | { |
a34a8bfd | 977 | struct search *s = container_of(cl, struct search, cl); |
cafe5635 KO |
978 | struct bio *bio = &s->bio.bio; |
979 | ||
220bb38c KO |
980 | if (s->iop.flush_journal) |
981 | bch_journal_meta(s->iop.c, cl); | |
cafe5635 | 982 | |
84f0db03 | 983 | /* If it's a flush, we send the flush to the backing device too */ |
749b61da | 984 | closure_bio_submit(bio, cl); |
cafe5635 KO |
985 | |
986 | continue_at(cl, cached_dev_bio_complete, NULL); | |
987 | } | |
988 | ||
989 | /* Cached devices - read & write stuff */ | |
990 | ||
dece1635 JA |
991 | static blk_qc_t cached_dev_make_request(struct request_queue *q, |
992 | struct bio *bio) | |
cafe5635 KO |
993 | { |
994 | struct search *s; | |
74d46992 | 995 | struct bcache_device *d = bio->bi_disk->private_data; |
cafe5635 | 996 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); |
aae4933d | 997 | int rw = bio_data_dir(bio); |
cafe5635 | 998 | |
b1092c9a | 999 | atomic_set(&dc->backing_idle, 0); |
d62e26b3 | 1000 | generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); |
cafe5635 | 1001 | |
74d46992 | 1002 | bio_set_dev(bio, dc->bdev); |
4f024f37 | 1003 | bio->bi_iter.bi_sector += dc->sb.data_offset; |
cafe5635 KO |
1004 | |
1005 | if (cached_dev_get(dc)) { | |
1006 | s = search_alloc(bio, d); | |
220bb38c | 1007 | trace_bcache_request_start(s->d, bio); |
cafe5635 | 1008 | |
4f024f37 | 1009 | if (!bio->bi_iter.bi_size) { |
a34a8bfd KO |
1010 | /* |
1011 | * can't call bch_journal_meta from under | |
1012 | * generic_make_request | |
1013 | */ | |
1014 | continue_at_nobarrier(&s->cl, | |
1015 | cached_dev_nodata, | |
1016 | bcache_wq); | |
1017 | } else { | |
220bb38c | 1018 | s->iop.bypass = check_should_bypass(dc, bio); |
84f0db03 KO |
1019 | |
1020 | if (rw) | |
cdd972b1 | 1021 | cached_dev_write(dc, s); |
84f0db03 | 1022 | else |
cdd972b1 | 1023 | cached_dev_read(dc, s); |
84f0db03 | 1024 | } |
cafe5635 | 1025 | } else { |
ad0d9e76 | 1026 | if ((bio_op(bio) == REQ_OP_DISCARD) && |
cafe5635 | 1027 | !blk_queue_discard(bdev_get_queue(dc->bdev))) |
4246a0b6 | 1028 | bio_endio(bio); |
cafe5635 | 1029 | else |
749b61da | 1030 | generic_make_request(bio); |
cafe5635 | 1031 | } |
dece1635 JA |
1032 | |
1033 | return BLK_QC_T_NONE; | |
cafe5635 KO |
1034 | } |
1035 | ||
1036 | static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, | |
1037 | unsigned int cmd, unsigned long arg) | |
1038 | { | |
1039 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | |
1040 | return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); | |
1041 | } | |
1042 | ||
1043 | static int cached_dev_congested(void *data, int bits) | |
1044 | { | |
1045 | struct bcache_device *d = data; | |
1046 | struct cached_dev *dc = container_of(d, struct cached_dev, disk); | |
1047 | struct request_queue *q = bdev_get_queue(dc->bdev); | |
1048 | int ret = 0; | |
1049 | ||
dc3b17cc | 1050 | if (bdi_congested(q->backing_dev_info, bits)) |
cafe5635 KO |
1051 | return 1; |
1052 | ||
1053 | if (cached_dev_get(dc)) { | |
1054 | unsigned i; | |
1055 | struct cache *ca; | |
1056 | ||
1057 | for_each_cache(ca, d->c, i) { | |
1058 | q = bdev_get_queue(ca->bdev); | |
dc3b17cc | 1059 | ret |= bdi_congested(q->backing_dev_info, bits); |
cafe5635 KO |
1060 | } |
1061 | ||
1062 | cached_dev_put(dc); | |
1063 | } | |
1064 | ||
1065 | return ret; | |
1066 | } | |
1067 | ||
1068 | void bch_cached_dev_request_init(struct cached_dev *dc) | |
1069 | { | |
1070 | struct gendisk *g = dc->disk.disk; | |
1071 | ||
1072 | g->queue->make_request_fn = cached_dev_make_request; | |
dc3b17cc | 1073 | g->queue->backing_dev_info->congested_fn = cached_dev_congested; |
cafe5635 KO |
1074 | dc->disk.cache_miss = cached_dev_cache_miss; |
1075 | dc->disk.ioctl = cached_dev_ioctl; | |
1076 | } | |
1077 | ||
1078 | /* Flash backed devices */ | |
1079 | ||
1080 | static int flash_dev_cache_miss(struct btree *b, struct search *s, | |
1081 | struct bio *bio, unsigned sectors) | |
1082 | { | |
1b4eaf3d | 1083 | unsigned bytes = min(sectors, bio_sectors(bio)) << 9; |
cafe5635 | 1084 | |
1b4eaf3d KO |
1085 | swap(bio->bi_iter.bi_size, bytes); |
1086 | zero_fill_bio(bio); | |
1087 | swap(bio->bi_iter.bi_size, bytes); | |
cafe5635 | 1088 | |
1b4eaf3d | 1089 | bio_advance(bio, bytes); |
8e51e414 | 1090 | |
4f024f37 | 1091 | if (!bio->bi_iter.bi_size) |
2c1953e2 | 1092 | return MAP_DONE; |
cafe5635 | 1093 | |
2c1953e2 | 1094 | return MAP_CONTINUE; |
cafe5635 KO |
1095 | } |
1096 | ||
a34a8bfd KO |
1097 | static void flash_dev_nodata(struct closure *cl) |
1098 | { | |
1099 | struct search *s = container_of(cl, struct search, cl); | |
1100 | ||
220bb38c KO |
1101 | if (s->iop.flush_journal) |
1102 | bch_journal_meta(s->iop.c, cl); | |
a34a8bfd KO |
1103 | |
1104 | continue_at(cl, search_free, NULL); | |
1105 | } | |
1106 | ||
dece1635 JA |
1107 | static blk_qc_t flash_dev_make_request(struct request_queue *q, |
1108 | struct bio *bio) | |
cafe5635 KO |
1109 | { |
1110 | struct search *s; | |
1111 | struct closure *cl; | |
74d46992 | 1112 | struct bcache_device *d = bio->bi_disk->private_data; |
aae4933d | 1113 | int rw = bio_data_dir(bio); |
cafe5635 | 1114 | |
d62e26b3 | 1115 | generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); |
cafe5635 KO |
1116 | |
1117 | s = search_alloc(bio, d); | |
1118 | cl = &s->cl; | |
1119 | bio = &s->bio.bio; | |
1120 | ||
220bb38c | 1121 | trace_bcache_request_start(s->d, bio); |
cafe5635 | 1122 | |
4f024f37 | 1123 | if (!bio->bi_iter.bi_size) { |
a34a8bfd KO |
1124 | /* |
1125 | * can't call bch_journal_meta from under | |
1126 | * generic_make_request | |
1127 | */ | |
1128 | continue_at_nobarrier(&s->cl, | |
1129 | flash_dev_nodata, | |
1130 | bcache_wq); | |
dece1635 | 1131 | return BLK_QC_T_NONE; |
84f0db03 | 1132 | } else if (rw) { |
220bb38c | 1133 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, |
4f024f37 | 1134 | &KEY(d->id, bio->bi_iter.bi_sector, 0), |
8e51e414 | 1135 | &KEY(d->id, bio_end_sector(bio), 0)); |
cafe5635 | 1136 | |
ad0d9e76 | 1137 | s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; |
220bb38c KO |
1138 | s->iop.writeback = true; |
1139 | s->iop.bio = bio; | |
cafe5635 | 1140 | |
220bb38c | 1141 | closure_call(&s->iop.cl, bch_data_insert, NULL, cl); |
cafe5635 | 1142 | } else { |
220bb38c | 1143 | closure_call(&s->iop.cl, cache_lookup, NULL, cl); |
cafe5635 KO |
1144 | } |
1145 | ||
1146 | continue_at(cl, search_free, NULL); | |
dece1635 | 1147 | return BLK_QC_T_NONE; |
cafe5635 KO |
1148 | } |
1149 | ||
1150 | static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, | |
1151 | unsigned int cmd, unsigned long arg) | |
1152 | { | |
1153 | return -ENOTTY; | |
1154 | } | |
1155 | ||
1156 | static int flash_dev_congested(void *data, int bits) | |
1157 | { | |
1158 | struct bcache_device *d = data; | |
1159 | struct request_queue *q; | |
1160 | struct cache *ca; | |
1161 | unsigned i; | |
1162 | int ret = 0; | |
1163 | ||
1164 | for_each_cache(ca, d->c, i) { | |
1165 | q = bdev_get_queue(ca->bdev); | |
dc3b17cc | 1166 | ret |= bdi_congested(q->backing_dev_info, bits); |
cafe5635 KO |
1167 | } |
1168 | ||
1169 | return ret; | |
1170 | } | |
1171 | ||
1172 | void bch_flash_dev_request_init(struct bcache_device *d) | |
1173 | { | |
1174 | struct gendisk *g = d->disk; | |
1175 | ||
1176 | g->queue->make_request_fn = flash_dev_make_request; | |
dc3b17cc | 1177 | g->queue->backing_dev_info->congested_fn = flash_dev_congested; |
cafe5635 KO |
1178 | d->cache_miss = flash_dev_cache_miss; |
1179 | d->ioctl = flash_dev_ioctl; | |
1180 | } | |
1181 | ||
1182 | void bch_request_exit(void) | |
1183 | { | |
cafe5635 KO |
1184 | if (bch_search_cache) |
1185 | kmem_cache_destroy(bch_search_cache); | |
1186 | } | |
1187 | ||
1188 | int __init bch_request_init(void) | |
1189 | { | |
1190 | bch_search_cache = KMEM_CACHE(search, 0); | |
1191 | if (!bch_search_cache) | |
1192 | return -ENOMEM; | |
1193 | ||
cafe5635 KO |
1194 | return 0; |
1195 | } |