License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6-block.git] / drivers / md / bcache / request.c
... / ...
CommitLineData
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Main bcache entry point - handle a read or a write request and decide what to
4 * do with it; the make_request functions are called by the block layer.
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10#include "bcache.h"
11#include "btree.h"
12#include "debug.h"
13#include "request.h"
14#include "writeback.h"
15
16#include <linux/module.h>
17#include <linux/hash.h>
18#include <linux/random.h>
19#include <linux/backing-dev.h>
20
21#include <trace/events/bcache.h>
22
23#define CUTOFF_CACHE_ADD 95
24#define CUTOFF_CACHE_READA 90
25
26struct kmem_cache *bch_search_cache;
27
28static void bch_data_insert_start(struct closure *);
29
30static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
31{
32 return BDEV_CACHE_MODE(&dc->sb);
33}
34
35static bool verify(struct cached_dev *dc, struct bio *bio)
36{
37 return dc->verify;
38}
39
40static void bio_csum(struct bio *bio, struct bkey *k)
41{
42 struct bio_vec bv;
43 struct bvec_iter iter;
44 uint64_t csum = 0;
45
46 bio_for_each_segment(bv, bio, iter) {
47 void *d = kmap(bv.bv_page) + bv.bv_offset;
48 csum = bch_crc64_update(csum, d, bv.bv_len);
49 kunmap(bv.bv_page);
50 }
51
52 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
53}
54
55/* Insert data into cache */
56
57static void bch_data_insert_keys(struct closure *cl)
58{
59 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
60 atomic_t *journal_ref = NULL;
61 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
62 int ret;
63
64 /*
65 * If we're looping, might already be waiting on
66 * another journal write - can't wait on more than one journal write at
67 * a time
68 *
69 * XXX: this looks wrong
70 */
71#if 0
72 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
73 closure_sync(&s->cl);
74#endif
75
76 if (!op->replace)
77 journal_ref = bch_journal(op->c, &op->insert_keys,
78 op->flush_journal ? cl : NULL);
79
80 ret = bch_btree_insert(op->c, &op->insert_keys,
81 journal_ref, replace_key);
82 if (ret == -ESRCH) {
83 op->replace_collision = true;
84 } else if (ret) {
85 op->status = BLK_STS_RESOURCE;
86 op->insert_data_done = true;
87 }
88
89 if (journal_ref)
90 atomic_dec_bug(journal_ref);
91
92 if (!op->insert_data_done) {
93 continue_at(cl, bch_data_insert_start, op->wq);
94 return;
95 }
96
97 bch_keylist_free(&op->insert_keys);
98 closure_return(cl);
99}
100
101static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
102 struct cache_set *c)
103{
104 size_t oldsize = bch_keylist_nkeys(l);
105 size_t newsize = oldsize + u64s;
106
107 /*
108 * The journalling code doesn't handle the case where the keys to insert
109 * is bigger than an empty write: If we just return -ENOMEM here,
110 * bio_insert() and bio_invalidate() will insert the keys created so far
111 * and finish the rest when the keylist is empty.
112 */
113 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
114 return -ENOMEM;
115
116 return __bch_keylist_realloc(l, u64s);
117}
118
119static void bch_data_invalidate(struct closure *cl)
120{
121 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
122 struct bio *bio = op->bio;
123
124 pr_debug("invalidating %i sectors from %llu",
125 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
126
127 while (bio_sectors(bio)) {
128 unsigned sectors = min(bio_sectors(bio),
129 1U << (KEY_SIZE_BITS - 1));
130
131 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
132 goto out;
133
134 bio->bi_iter.bi_sector += sectors;
135 bio->bi_iter.bi_size -= sectors << 9;
136
137 bch_keylist_add(&op->insert_keys,
138 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
139 }
140
141 op->insert_data_done = true;
142 bio_put(bio);
143out:
144 continue_at(cl, bch_data_insert_keys, op->wq);
145}
146
147static void bch_data_insert_error(struct closure *cl)
148{
149 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
150
151 /*
152 * Our data write just errored, which means we've got a bunch of keys to
153 * insert that point to data that wasn't succesfully written.
154 *
155 * We don't have to insert those keys but we still have to invalidate
156 * that region of the cache - so, if we just strip off all the pointers
157 * from the keys we'll accomplish just that.
158 */
159
160 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
161
162 while (src != op->insert_keys.top) {
163 struct bkey *n = bkey_next(src);
164
165 SET_KEY_PTRS(src, 0);
166 memmove(dst, src, bkey_bytes(src));
167
168 dst = bkey_next(dst);
169 src = n;
170 }
171
172 op->insert_keys.top = dst;
173
174 bch_data_insert_keys(cl);
175}
176
177static void bch_data_insert_endio(struct bio *bio)
178{
179 struct closure *cl = bio->bi_private;
180 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
181
182 if (bio->bi_status) {
183 /* TODO: We could try to recover from this. */
184 if (op->writeback)
185 op->status = bio->bi_status;
186 else if (!op->replace)
187 set_closure_fn(cl, bch_data_insert_error, op->wq);
188 else
189 set_closure_fn(cl, NULL, NULL);
190 }
191
192 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
193}
194
195static void bch_data_insert_start(struct closure *cl)
196{
197 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
198 struct bio *bio = op->bio, *n;
199
200 if (op->bypass)
201 return bch_data_invalidate(cl);
202
203 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
204 wake_up_gc(op->c);
205
206 /*
207 * Journal writes are marked REQ_PREFLUSH; if the original write was a
208 * flush, it'll wait on the journal write.
209 */
210 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
211
212 do {
213 unsigned i;
214 struct bkey *k;
215 struct bio_set *split = op->c->bio_split;
216
217 /* 1 for the device pointer and 1 for the chksum */
218 if (bch_keylist_realloc(&op->insert_keys,
219 3 + (op->csum ? 1 : 0),
220 op->c)) {
221 continue_at(cl, bch_data_insert_keys, op->wq);
222 return;
223 }
224
225 k = op->insert_keys.top;
226 bkey_init(k);
227 SET_KEY_INODE(k, op->inode);
228 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
229
230 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
231 op->write_point, op->write_prio,
232 op->writeback))
233 goto err;
234
235 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
236
237 n->bi_end_io = bch_data_insert_endio;
238 n->bi_private = cl;
239
240 if (op->writeback) {
241 SET_KEY_DIRTY(k, true);
242
243 for (i = 0; i < KEY_PTRS(k); i++)
244 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
245 GC_MARK_DIRTY);
246 }
247
248 SET_KEY_CSUM(k, op->csum);
249 if (KEY_CSUM(k))
250 bio_csum(n, k);
251
252 trace_bcache_cache_insert(k);
253 bch_keylist_push(&op->insert_keys);
254
255 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
256 bch_submit_bbio(n, op->c, k, 0);
257 } while (n != bio);
258
259 op->insert_data_done = true;
260 continue_at(cl, bch_data_insert_keys, op->wq);
261 return;
262err:
263 /* bch_alloc_sectors() blocks if s->writeback = true */
264 BUG_ON(op->writeback);
265
266 /*
267 * But if it's not a writeback write we'd rather just bail out if
268 * there aren't any buckets ready to write to - it might take awhile and
269 * we might be starving btree writes for gc or something.
270 */
271
272 if (!op->replace) {
273 /*
274 * Writethrough write: We can't complete the write until we've
275 * updated the index. But we don't want to delay the write while
276 * we wait for buckets to be freed up, so just invalidate the
277 * rest of the write.
278 */
279 op->bypass = true;
280 return bch_data_invalidate(cl);
281 } else {
282 /*
283 * From a cache miss, we can just insert the keys for the data
284 * we have written or bail out if we didn't do anything.
285 */
286 op->insert_data_done = true;
287 bio_put(bio);
288
289 if (!bch_keylist_empty(&op->insert_keys))
290 continue_at(cl, bch_data_insert_keys, op->wq);
291 else
292 closure_return(cl);
293 }
294}
295
296/**
297 * bch_data_insert - stick some data in the cache
298 *
299 * This is the starting point for any data to end up in a cache device; it could
300 * be from a normal write, or a writeback write, or a write to a flash only
301 * volume - it's also used by the moving garbage collector to compact data in
302 * mostly empty buckets.
303 *
304 * It first writes the data to the cache, creating a list of keys to be inserted
305 * (if the data had to be fragmented there will be multiple keys); after the
306 * data is written it calls bch_journal, and after the keys have been added to
307 * the next journal write they're inserted into the btree.
308 *
309 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
310 * and op->inode is used for the key inode.
311 *
312 * If s->bypass is true, instead of inserting the data it invalidates the
313 * region of the cache represented by s->cache_bio and op->inode.
314 */
315void bch_data_insert(struct closure *cl)
316{
317 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
318
319 trace_bcache_write(op->c, op->inode, op->bio,
320 op->writeback, op->bypass);
321
322 bch_keylist_init(&op->insert_keys);
323 bio_get(op->bio);
324 bch_data_insert_start(cl);
325}
326
327/* Congested? */
328
329unsigned bch_get_congested(struct cache_set *c)
330{
331 int i;
332 long rand;
333
334 if (!c->congested_read_threshold_us &&
335 !c->congested_write_threshold_us)
336 return 0;
337
338 i = (local_clock_us() - c->congested_last_us) / 1024;
339 if (i < 0)
340 return 0;
341
342 i += atomic_read(&c->congested);
343 if (i >= 0)
344 return 0;
345
346 i += CONGESTED_MAX;
347
348 if (i > 0)
349 i = fract_exp_two(i, 6);
350
351 rand = get_random_int();
352 i -= bitmap_weight(&rand, BITS_PER_LONG);
353
354 return i > 0 ? i : 1;
355}
356
357static void add_sequential(struct task_struct *t)
358{
359 ewma_add(t->sequential_io_avg,
360 t->sequential_io, 8, 0);
361
362 t->sequential_io = 0;
363}
364
365static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
366{
367 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
368}
369
370static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
371{
372 struct cache_set *c = dc->disk.c;
373 unsigned mode = cache_mode(dc, bio);
374 unsigned sectors, congested = bch_get_congested(c);
375 struct task_struct *task = current;
376 struct io *i;
377
378 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
379 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
380 (bio_op(bio) == REQ_OP_DISCARD))
381 goto skip;
382
383 if (mode == CACHE_MODE_NONE ||
384 (mode == CACHE_MODE_WRITEAROUND &&
385 op_is_write(bio_op(bio))))
386 goto skip;
387
388 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
389 bio_sectors(bio) & (c->sb.block_size - 1)) {
390 pr_debug("skipping unaligned io");
391 goto skip;
392 }
393
394 if (bypass_torture_test(dc)) {
395 if ((get_random_int() & 3) == 3)
396 goto skip;
397 else
398 goto rescale;
399 }
400
401 if (!congested && !dc->sequential_cutoff)
402 goto rescale;
403
404 spin_lock(&dc->io_lock);
405
406 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
407 if (i->last == bio->bi_iter.bi_sector &&
408 time_before(jiffies, i->jiffies))
409 goto found;
410
411 i = list_first_entry(&dc->io_lru, struct io, lru);
412
413 add_sequential(task);
414 i->sequential = 0;
415found:
416 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
417 i->sequential += bio->bi_iter.bi_size;
418
419 i->last = bio_end_sector(bio);
420 i->jiffies = jiffies + msecs_to_jiffies(5000);
421 task->sequential_io = i->sequential;
422
423 hlist_del(&i->hash);
424 hlist_add_head(&i->hash, iohash(dc, i->last));
425 list_move_tail(&i->lru, &dc->io_lru);
426
427 spin_unlock(&dc->io_lock);
428
429 sectors = max(task->sequential_io,
430 task->sequential_io_avg) >> 9;
431
432 if (dc->sequential_cutoff &&
433 sectors >= dc->sequential_cutoff >> 9) {
434 trace_bcache_bypass_sequential(bio);
435 goto skip;
436 }
437
438 if (congested && sectors >= congested) {
439 trace_bcache_bypass_congested(bio);
440 goto skip;
441 }
442
443rescale:
444 bch_rescale_priorities(c, bio_sectors(bio));
445 return false;
446skip:
447 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
448 return true;
449}
450
451/* Cache lookup */
452
453struct search {
454 /* Stack frame for bio_complete */
455 struct closure cl;
456
457 struct bbio bio;
458 struct bio *orig_bio;
459 struct bio *cache_miss;
460 struct bcache_device *d;
461
462 unsigned insert_bio_sectors;
463 unsigned recoverable:1;
464 unsigned write:1;
465 unsigned read_dirty_data:1;
466
467 unsigned long start_time;
468
469 struct btree_op op;
470 struct data_insert_op iop;
471};
472
473static void bch_cache_read_endio(struct bio *bio)
474{
475 struct bbio *b = container_of(bio, struct bbio, bio);
476 struct closure *cl = bio->bi_private;
477 struct search *s = container_of(cl, struct search, cl);
478
479 /*
480 * If the bucket was reused while our bio was in flight, we might have
481 * read the wrong data. Set s->error but not error so it doesn't get
482 * counted against the cache device, but we'll still reread the data
483 * from the backing device.
484 */
485
486 if (bio->bi_status)
487 s->iop.status = bio->bi_status;
488 else if (!KEY_DIRTY(&b->key) &&
489 ptr_stale(s->iop.c, &b->key, 0)) {
490 atomic_long_inc(&s->iop.c->cache_read_races);
491 s->iop.status = BLK_STS_IOERR;
492 }
493
494 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
495}
496
497/*
498 * Read from a single key, handling the initial cache miss if the key starts in
499 * the middle of the bio
500 */
501static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
502{
503 struct search *s = container_of(op, struct search, op);
504 struct bio *n, *bio = &s->bio.bio;
505 struct bkey *bio_key;
506 unsigned ptr;
507
508 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
509 return MAP_CONTINUE;
510
511 if (KEY_INODE(k) != s->iop.inode ||
512 KEY_START(k) > bio->bi_iter.bi_sector) {
513 unsigned bio_sectors = bio_sectors(bio);
514 unsigned sectors = KEY_INODE(k) == s->iop.inode
515 ? min_t(uint64_t, INT_MAX,
516 KEY_START(k) - bio->bi_iter.bi_sector)
517 : INT_MAX;
518
519 int ret = s->d->cache_miss(b, s, bio, sectors);
520 if (ret != MAP_CONTINUE)
521 return ret;
522
523 /* if this was a complete miss we shouldn't get here */
524 BUG_ON(bio_sectors <= sectors);
525 }
526
527 if (!KEY_SIZE(k))
528 return MAP_CONTINUE;
529
530 /* XXX: figure out best pointer - for multiple cache devices */
531 ptr = 0;
532
533 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
534
535 if (KEY_DIRTY(k))
536 s->read_dirty_data = true;
537
538 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
539 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
540 GFP_NOIO, s->d->bio_split);
541
542 bio_key = &container_of(n, struct bbio, bio)->key;
543 bch_bkey_copy_single_ptr(bio_key, k, ptr);
544
545 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
546 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
547
548 n->bi_end_io = bch_cache_read_endio;
549 n->bi_private = &s->cl;
550
551 /*
552 * The bucket we're reading from might be reused while our bio
553 * is in flight, and we could then end up reading the wrong
554 * data.
555 *
556 * We guard against this by checking (in cache_read_endio()) if
557 * the pointer is stale again; if so, we treat it as an error
558 * and reread from the backing device (but we don't pass that
559 * error up anywhere).
560 */
561
562 __bch_submit_bbio(n, b->c);
563 return n == bio ? MAP_DONE : MAP_CONTINUE;
564}
565
566static void cache_lookup(struct closure *cl)
567{
568 struct search *s = container_of(cl, struct search, iop.cl);
569 struct bio *bio = &s->bio.bio;
570 int ret;
571
572 bch_btree_op_init(&s->op, -1);
573
574 ret = bch_btree_map_keys(&s->op, s->iop.c,
575 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
576 cache_lookup_fn, MAP_END_KEY);
577 if (ret == -EAGAIN) {
578 continue_at(cl, cache_lookup, bcache_wq);
579 return;
580 }
581
582 closure_return(cl);
583}
584
585/* Common code for the make_request functions */
586
587static void request_endio(struct bio *bio)
588{
589 struct closure *cl = bio->bi_private;
590
591 if (bio->bi_status) {
592 struct search *s = container_of(cl, struct search, cl);
593 s->iop.status = bio->bi_status;
594 /* Only cache read errors are recoverable */
595 s->recoverable = false;
596 }
597
598 bio_put(bio);
599 closure_put(cl);
600}
601
602static void bio_complete(struct search *s)
603{
604 if (s->orig_bio) {
605 struct request_queue *q = s->orig_bio->bi_disk->queue;
606 generic_end_io_acct(q, bio_data_dir(s->orig_bio),
607 &s->d->disk->part0, s->start_time);
608
609 trace_bcache_request_end(s->d, s->orig_bio);
610 s->orig_bio->bi_status = s->iop.status;
611 bio_endio(s->orig_bio);
612 s->orig_bio = NULL;
613 }
614}
615
616static void do_bio_hook(struct search *s, struct bio *orig_bio)
617{
618 struct bio *bio = &s->bio.bio;
619
620 bio_init(bio, NULL, 0);
621 __bio_clone_fast(bio, orig_bio);
622 bio->bi_end_io = request_endio;
623 bio->bi_private = &s->cl;
624
625 bio_cnt_set(bio, 3);
626}
627
628static void search_free(struct closure *cl)
629{
630 struct search *s = container_of(cl, struct search, cl);
631 bio_complete(s);
632
633 if (s->iop.bio)
634 bio_put(s->iop.bio);
635
636 closure_debug_destroy(cl);
637 mempool_free(s, s->d->c->search);
638}
639
640static inline struct search *search_alloc(struct bio *bio,
641 struct bcache_device *d)
642{
643 struct search *s;
644
645 s = mempool_alloc(d->c->search, GFP_NOIO);
646
647 closure_init(&s->cl, NULL);
648 do_bio_hook(s, bio);
649
650 s->orig_bio = bio;
651 s->cache_miss = NULL;
652 s->d = d;
653 s->recoverable = 1;
654 s->write = op_is_write(bio_op(bio));
655 s->read_dirty_data = 0;
656 s->start_time = jiffies;
657
658 s->iop.c = d->c;
659 s->iop.bio = NULL;
660 s->iop.inode = d->id;
661 s->iop.write_point = hash_long((unsigned long) current, 16);
662 s->iop.write_prio = 0;
663 s->iop.status = 0;
664 s->iop.flags = 0;
665 s->iop.flush_journal = op_is_flush(bio->bi_opf);
666 s->iop.wq = bcache_wq;
667
668 return s;
669}
670
671/* Cached devices */
672
673static void cached_dev_bio_complete(struct closure *cl)
674{
675 struct search *s = container_of(cl, struct search, cl);
676 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
677
678 search_free(cl);
679 cached_dev_put(dc);
680}
681
682/* Process reads */
683
684static void cached_dev_cache_miss_done(struct closure *cl)
685{
686 struct search *s = container_of(cl, struct search, cl);
687
688 if (s->iop.replace_collision)
689 bch_mark_cache_miss_collision(s->iop.c, s->d);
690
691 if (s->iop.bio)
692 bio_free_pages(s->iop.bio);
693
694 cached_dev_bio_complete(cl);
695}
696
697static void cached_dev_read_error(struct closure *cl)
698{
699 struct search *s = container_of(cl, struct search, cl);
700 struct bio *bio = &s->bio.bio;
701
702 if (s->recoverable) {
703 /* Retry from the backing device: */
704 trace_bcache_read_retry(s->orig_bio);
705
706 s->iop.status = 0;
707 do_bio_hook(s, s->orig_bio);
708
709 /* XXX: invalidate cache */
710
711 closure_bio_submit(bio, cl);
712 }
713
714 continue_at(cl, cached_dev_cache_miss_done, NULL);
715}
716
717static void cached_dev_read_done(struct closure *cl)
718{
719 struct search *s = container_of(cl, struct search, cl);
720 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
721
722 /*
723 * We had a cache miss; cache_bio now contains data ready to be inserted
724 * into the cache.
725 *
726 * First, we copy the data we just read from cache_bio's bounce buffers
727 * to the buffers the original bio pointed to:
728 */
729
730 if (s->iop.bio) {
731 bio_reset(s->iop.bio);
732 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
733 bio_copy_dev(s->iop.bio, s->cache_miss);
734 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
735 bch_bio_map(s->iop.bio, NULL);
736
737 bio_copy_data(s->cache_miss, s->iop.bio);
738
739 bio_put(s->cache_miss);
740 s->cache_miss = NULL;
741 }
742
743 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
744 bch_data_verify(dc, s->orig_bio);
745
746 bio_complete(s);
747
748 if (s->iop.bio &&
749 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
750 BUG_ON(!s->iop.replace);
751 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
752 }
753
754 continue_at(cl, cached_dev_cache_miss_done, NULL);
755}
756
757static void cached_dev_read_done_bh(struct closure *cl)
758{
759 struct search *s = container_of(cl, struct search, cl);
760 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
761
762 bch_mark_cache_accounting(s->iop.c, s->d,
763 !s->cache_miss, s->iop.bypass);
764 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
765
766 if (s->iop.status)
767 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
768 else if (s->iop.bio || verify(dc, &s->bio.bio))
769 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
770 else
771 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
772}
773
774static int cached_dev_cache_miss(struct btree *b, struct search *s,
775 struct bio *bio, unsigned sectors)
776{
777 int ret = MAP_CONTINUE;
778 unsigned reada = 0;
779 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
780 struct bio *miss, *cache_bio;
781
782 if (s->cache_miss || s->iop.bypass) {
783 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
784 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
785 goto out_submit;
786 }
787
788 if (!(bio->bi_opf & REQ_RAHEAD) &&
789 !(bio->bi_opf & REQ_META) &&
790 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
791 reada = min_t(sector_t, dc->readahead >> 9,
792 get_capacity(bio->bi_disk) - bio_end_sector(bio));
793
794 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
795
796 s->iop.replace_key = KEY(s->iop.inode,
797 bio->bi_iter.bi_sector + s->insert_bio_sectors,
798 s->insert_bio_sectors);
799
800 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
801 if (ret)
802 return ret;
803
804 s->iop.replace = true;
805
806 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
807
808 /* btree_search_recurse()'s btree iterator is no good anymore */
809 ret = miss == bio ? MAP_DONE : -EINTR;
810
811 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
812 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
813 dc->disk.bio_split);
814 if (!cache_bio)
815 goto out_submit;
816
817 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
818 bio_copy_dev(cache_bio, miss);
819 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
820
821 cache_bio->bi_end_io = request_endio;
822 cache_bio->bi_private = &s->cl;
823
824 bch_bio_map(cache_bio, NULL);
825 if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
826 goto out_put;
827
828 if (reada)
829 bch_mark_cache_readahead(s->iop.c, s->d);
830
831 s->cache_miss = miss;
832 s->iop.bio = cache_bio;
833 bio_get(cache_bio);
834 closure_bio_submit(cache_bio, &s->cl);
835
836 return ret;
837out_put:
838 bio_put(cache_bio);
839out_submit:
840 miss->bi_end_io = request_endio;
841 miss->bi_private = &s->cl;
842 closure_bio_submit(miss, &s->cl);
843 return ret;
844}
845
846static void cached_dev_read(struct cached_dev *dc, struct search *s)
847{
848 struct closure *cl = &s->cl;
849
850 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
851 continue_at(cl, cached_dev_read_done_bh, NULL);
852}
853
854/* Process writes */
855
856static void cached_dev_write_complete(struct closure *cl)
857{
858 struct search *s = container_of(cl, struct search, cl);
859 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
860
861 up_read_non_owner(&dc->writeback_lock);
862 cached_dev_bio_complete(cl);
863}
864
865static void cached_dev_write(struct cached_dev *dc, struct search *s)
866{
867 struct closure *cl = &s->cl;
868 struct bio *bio = &s->bio.bio;
869 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
870 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
871
872 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
873
874 down_read_non_owner(&dc->writeback_lock);
875 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
876 /*
877 * We overlap with some dirty data undergoing background
878 * writeback, force this write to writeback
879 */
880 s->iop.bypass = false;
881 s->iop.writeback = true;
882 }
883
884 /*
885 * Discards aren't _required_ to do anything, so skipping if
886 * check_overlapping returned true is ok
887 *
888 * But check_overlapping drops dirty keys for which io hasn't started,
889 * so we still want to call it.
890 */
891 if (bio_op(bio) == REQ_OP_DISCARD)
892 s->iop.bypass = true;
893
894 if (should_writeback(dc, s->orig_bio,
895 cache_mode(dc, bio),
896 s->iop.bypass)) {
897 s->iop.bypass = false;
898 s->iop.writeback = true;
899 }
900
901 if (s->iop.bypass) {
902 s->iop.bio = s->orig_bio;
903 bio_get(s->iop.bio);
904
905 if ((bio_op(bio) != REQ_OP_DISCARD) ||
906 blk_queue_discard(bdev_get_queue(dc->bdev)))
907 closure_bio_submit(bio, cl);
908 } else if (s->iop.writeback) {
909 bch_writeback_add(dc);
910 s->iop.bio = bio;
911
912 if (bio->bi_opf & REQ_PREFLUSH) {
913 /* Also need to send a flush to the backing device */
914 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
915 dc->disk.bio_split);
916
917 bio_copy_dev(flush, bio);
918 flush->bi_end_io = request_endio;
919 flush->bi_private = cl;
920 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
921
922 closure_bio_submit(flush, cl);
923 }
924 } else {
925 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
926
927 closure_bio_submit(bio, cl);
928 }
929
930 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
931 continue_at(cl, cached_dev_write_complete, NULL);
932}
933
934static void cached_dev_nodata(struct closure *cl)
935{
936 struct search *s = container_of(cl, struct search, cl);
937 struct bio *bio = &s->bio.bio;
938
939 if (s->iop.flush_journal)
940 bch_journal_meta(s->iop.c, cl);
941
942 /* If it's a flush, we send the flush to the backing device too */
943 closure_bio_submit(bio, cl);
944
945 continue_at(cl, cached_dev_bio_complete, NULL);
946}
947
948/* Cached devices - read & write stuff */
949
950static blk_qc_t cached_dev_make_request(struct request_queue *q,
951 struct bio *bio)
952{
953 struct search *s;
954 struct bcache_device *d = bio->bi_disk->private_data;
955 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
956 int rw = bio_data_dir(bio);
957
958 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
959
960 bio_set_dev(bio, dc->bdev);
961 bio->bi_iter.bi_sector += dc->sb.data_offset;
962
963 if (cached_dev_get(dc)) {
964 s = search_alloc(bio, d);
965 trace_bcache_request_start(s->d, bio);
966
967 if (!bio->bi_iter.bi_size) {
968 /*
969 * can't call bch_journal_meta from under
970 * generic_make_request
971 */
972 continue_at_nobarrier(&s->cl,
973 cached_dev_nodata,
974 bcache_wq);
975 } else {
976 s->iop.bypass = check_should_bypass(dc, bio);
977
978 if (rw)
979 cached_dev_write(dc, s);
980 else
981 cached_dev_read(dc, s);
982 }
983 } else {
984 if ((bio_op(bio) == REQ_OP_DISCARD) &&
985 !blk_queue_discard(bdev_get_queue(dc->bdev)))
986 bio_endio(bio);
987 else
988 generic_make_request(bio);
989 }
990
991 return BLK_QC_T_NONE;
992}
993
994static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
995 unsigned int cmd, unsigned long arg)
996{
997 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
998 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
999}
1000
1001static int cached_dev_congested(void *data, int bits)
1002{
1003 struct bcache_device *d = data;
1004 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1005 struct request_queue *q = bdev_get_queue(dc->bdev);
1006 int ret = 0;
1007
1008 if (bdi_congested(q->backing_dev_info, bits))
1009 return 1;
1010
1011 if (cached_dev_get(dc)) {
1012 unsigned i;
1013 struct cache *ca;
1014
1015 for_each_cache(ca, d->c, i) {
1016 q = bdev_get_queue(ca->bdev);
1017 ret |= bdi_congested(q->backing_dev_info, bits);
1018 }
1019
1020 cached_dev_put(dc);
1021 }
1022
1023 return ret;
1024}
1025
1026void bch_cached_dev_request_init(struct cached_dev *dc)
1027{
1028 struct gendisk *g = dc->disk.disk;
1029
1030 g->queue->make_request_fn = cached_dev_make_request;
1031 g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1032 dc->disk.cache_miss = cached_dev_cache_miss;
1033 dc->disk.ioctl = cached_dev_ioctl;
1034}
1035
1036/* Flash backed devices */
1037
1038static int flash_dev_cache_miss(struct btree *b, struct search *s,
1039 struct bio *bio, unsigned sectors)
1040{
1041 unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1042
1043 swap(bio->bi_iter.bi_size, bytes);
1044 zero_fill_bio(bio);
1045 swap(bio->bi_iter.bi_size, bytes);
1046
1047 bio_advance(bio, bytes);
1048
1049 if (!bio->bi_iter.bi_size)
1050 return MAP_DONE;
1051
1052 return MAP_CONTINUE;
1053}
1054
1055static void flash_dev_nodata(struct closure *cl)
1056{
1057 struct search *s = container_of(cl, struct search, cl);
1058
1059 if (s->iop.flush_journal)
1060 bch_journal_meta(s->iop.c, cl);
1061
1062 continue_at(cl, search_free, NULL);
1063}
1064
1065static blk_qc_t flash_dev_make_request(struct request_queue *q,
1066 struct bio *bio)
1067{
1068 struct search *s;
1069 struct closure *cl;
1070 struct bcache_device *d = bio->bi_disk->private_data;
1071 int rw = bio_data_dir(bio);
1072
1073 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
1074
1075 s = search_alloc(bio, d);
1076 cl = &s->cl;
1077 bio = &s->bio.bio;
1078
1079 trace_bcache_request_start(s->d, bio);
1080
1081 if (!bio->bi_iter.bi_size) {
1082 /*
1083 * can't call bch_journal_meta from under
1084 * generic_make_request
1085 */
1086 continue_at_nobarrier(&s->cl,
1087 flash_dev_nodata,
1088 bcache_wq);
1089 return BLK_QC_T_NONE;
1090 } else if (rw) {
1091 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1092 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1093 &KEY(d->id, bio_end_sector(bio), 0));
1094
1095 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1096 s->iop.writeback = true;
1097 s->iop.bio = bio;
1098
1099 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1100 } else {
1101 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1102 }
1103
1104 continue_at(cl, search_free, NULL);
1105 return BLK_QC_T_NONE;
1106}
1107
1108static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1109 unsigned int cmd, unsigned long arg)
1110{
1111 return -ENOTTY;
1112}
1113
1114static int flash_dev_congested(void *data, int bits)
1115{
1116 struct bcache_device *d = data;
1117 struct request_queue *q;
1118 struct cache *ca;
1119 unsigned i;
1120 int ret = 0;
1121
1122 for_each_cache(ca, d->c, i) {
1123 q = bdev_get_queue(ca->bdev);
1124 ret |= bdi_congested(q->backing_dev_info, bits);
1125 }
1126
1127 return ret;
1128}
1129
1130void bch_flash_dev_request_init(struct bcache_device *d)
1131{
1132 struct gendisk *g = d->disk;
1133
1134 g->queue->make_request_fn = flash_dev_make_request;
1135 g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1136 d->cache_miss = flash_dev_cache_miss;
1137 d->ioctl = flash_dev_ioctl;
1138}
1139
1140void bch_request_exit(void)
1141{
1142 if (bch_search_cache)
1143 kmem_cache_destroy(bch_search_cache);
1144}
1145
1146int __init bch_request_init(void)
1147{
1148 bch_search_cache = KMEM_CACHE(search, 0);
1149 if (!bch_search_cache)
1150 return -ENOMEM;
1151
1152 return 0;
1153}